diff options
Diffstat (limited to 'drivers/scsi')
430 files changed, 81868 insertions, 36769 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 5e1e12c0cf4..0a7325361d2 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = twa_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index c845bdbeb6c..4de346017e9 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = twl_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 56662ae03de..752624e6bc0 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -216,6 +216,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_cmnd.h> +#include <scsi/scsi_eh.h> #include "3w-xxxx.h" /* Globals */ @@ -2009,7 +2010,8 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command); tw_dev->state[request_id] = TW_S_COMPLETED; tw_state_request_finish(tw_dev, request_id); - SCpnt->result = (DID_BAD_TARGET << 16); + SCpnt->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0); done(SCpnt); retval = 0; } @@ -2277,7 +2279,8 @@ static struct scsi_host_template driver_template = { .cmd_per_lun = TW_MAX_CMDS_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = tw_host_attrs, - .emulated = 1 + .emulated = 1, + .no_write_same = 1, }; /* This function will probe and initialize a card */ diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index d7ca247efa3..972f8176665 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -26,8 +26,8 @@ */ -#define BusLogic_DriverVersion "2.1.16" -#define BusLogic_DriverDate "18 July 2002" +#define blogic_drvr_version "2.1.17" +#define blogic_drvr_date "12 September 2013" #include <linux/module.h> #include <linux/init.h> @@ -60,24 +60,24 @@ #define FAILURE (-1) #endif -static struct scsi_host_template Bus_Logic_template; +static struct scsi_host_template blogic_template; /* - BusLogic_DriverOptionsCount is a count of the number of BusLogic Driver + blogic_drvr_options_count is a count of the number of BusLogic Driver Options specifications provided via the Linux Kernel Command Line or via the Loadable Kernel Module Installation Facility. */ -static int BusLogic_DriverOptionsCount; +static int blogic_drvr_options_count; /* - BusLogic_DriverOptions is an array of Driver Options structures representing + blogic_drvr_options is an array of Driver Options structures representing BusLogic Driver Options specifications provided via the Linux Kernel Command Line or via the Loadable Kernel Module Installation Facility. */ -static struct BusLogic_DriverOptions BusLogic_DriverOptions[BusLogic_MaxHostAdapters]; +static struct blogic_drvr_options blogic_drvr_options[BLOGIC_MAX_ADAPTERS]; /* @@ -92,241 +92,253 @@ module_param(BusLogic, charp, 0); /* - BusLogic_ProbeOptions is a set of Probe Options to be applied across + blogic_probe_options is a set of Probe Options to be applied across all BusLogic Host Adapters. */ -static struct BusLogic_ProbeOptions BusLogic_ProbeOptions; +static struct blogic_probe_options blogic_probe_options; /* - BusLogic_GlobalOptions is a set of Global Options to be applied across + blogic_global_options is a set of Global Options to be applied across all BusLogic Host Adapters. */ -static struct BusLogic_GlobalOptions BusLogic_GlobalOptions; +static struct blogic_global_options blogic_global_options; -static LIST_HEAD(BusLogic_host_list); +static LIST_HEAD(blogic_host_list); /* - BusLogic_ProbeInfoCount is the number of entries in BusLogic_ProbeInfoList. + blogic_probeinfo_count is the number of entries in blogic_probeinfo_list. */ -static int BusLogic_ProbeInfoCount; +static int blogic_probeinfo_count; /* - BusLogic_ProbeInfoList is the list of I/O Addresses and Bus Probe Information + blogic_probeinfo_list is the list of I/O Addresses and Bus Probe Information to be checked for potential BusLogic Host Adapters. It is initialized by interrogating the PCI Configuration Space on PCI machines as well as from the list of standard BusLogic I/O Addresses. */ -static struct BusLogic_ProbeInfo *BusLogic_ProbeInfoList; +static struct blogic_probeinfo *blogic_probeinfo_list; /* - BusLogic_CommandFailureReason holds a string identifying the reason why a - call to BusLogic_Command failed. It is only non-NULL when BusLogic_Command + blogic_cmd_failure_reason holds a string identifying the reason why a + call to blogic_cmd failed. It is only non-NULL when blogic_cmd returns a failure code. */ -static char *BusLogic_CommandFailureReason; +static char *blogic_cmd_failure_reason; /* - BusLogic_AnnounceDriver announces the Driver Version and Date, Author's + blogic_announce_drvr announces the Driver Version and Date, Author's Name, Copyright Notice, and Electronic Mail Address. */ -static void BusLogic_AnnounceDriver(struct BusLogic_HostAdapter *HostAdapter) +static void blogic_announce_drvr(struct blogic_adapter *adapter) { - BusLogic_Announce("***** BusLogic SCSI Driver Version " BusLogic_DriverVersion " of " BusLogic_DriverDate " *****\n", HostAdapter); - BusLogic_Announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", HostAdapter); + blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter); + blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", adapter); } /* - BusLogic_DriverInfo returns the Host Adapter Name to identify this SCSI + blogic_drvr_info returns the Host Adapter Name to identify this SCSI Driver and Host Adapter. */ -static const char *BusLogic_DriverInfo(struct Scsi_Host *Host) +static const char *blogic_drvr_info(struct Scsi_Host *host) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Host->hostdata; - return HostAdapter->FullModelName; + struct blogic_adapter *adapter = + (struct blogic_adapter *) host->hostdata; + return adapter->full_model; } /* - BusLogic_InitializeCCBs initializes a group of Command Control Blocks (CCBs) - for Host Adapter from the BlockSize bytes located at BlockPointer. The newly + blogic_init_ccbs initializes a group of Command Control Blocks (CCBs) + for Host Adapter from the blk_size bytes located at blk_pointer. The newly created CCBs are added to Host Adapter's free list. */ -static void BusLogic_InitializeCCBs(struct BusLogic_HostAdapter *HostAdapter, void *BlockPointer, int BlockSize, dma_addr_t BlockPointerHandle) +static void blogic_init_ccbs(struct blogic_adapter *adapter, void *blk_pointer, + int blk_size, dma_addr_t blkp) { - struct BusLogic_CCB *CCB = (struct BusLogic_CCB *) BlockPointer; + struct blogic_ccb *ccb = (struct blogic_ccb *) blk_pointer; unsigned int offset = 0; - memset(BlockPointer, 0, BlockSize); - CCB->AllocationGroupHead = BlockPointerHandle; - CCB->AllocationGroupSize = BlockSize; - while ((BlockSize -= sizeof(struct BusLogic_CCB)) >= 0) { - CCB->Status = BusLogic_CCB_Free; - CCB->HostAdapter = HostAdapter; - CCB->DMA_Handle = (u32) BlockPointerHandle + offset; - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) { - CCB->CallbackFunction = BusLogic_QueueCompletedCCB; - CCB->BaseAddress = HostAdapter->FlashPointInfo.BaseAddress; + memset(blk_pointer, 0, blk_size); + ccb->allocgrp_head = blkp; + ccb->allocgrp_size = blk_size; + while ((blk_size -= sizeof(struct blogic_ccb)) >= 0) { + ccb->status = BLOGIC_CCB_FREE; + ccb->adapter = adapter; + ccb->dma_handle = (u32) blkp + offset; + if (blogic_flashpoint_type(adapter)) { + ccb->callback = blogic_qcompleted_ccb; + ccb->base_addr = adapter->fpinfo.base_addr; } - CCB->Next = HostAdapter->Free_CCBs; - CCB->NextAll = HostAdapter->All_CCBs; - HostAdapter->Free_CCBs = CCB; - HostAdapter->All_CCBs = CCB; - HostAdapter->AllocatedCCBs++; - CCB++; - offset += sizeof(struct BusLogic_CCB); + ccb->next = adapter->free_ccbs; + ccb->next_all = adapter->all_ccbs; + adapter->free_ccbs = ccb; + adapter->all_ccbs = ccb; + adapter->alloc_ccbs++; + ccb++; + offset += sizeof(struct blogic_ccb); } } /* - BusLogic_CreateInitialCCBs allocates the initial CCBs for Host Adapter. + blogic_create_initccbs allocates the initial CCBs for Host Adapter. */ -static bool __init BusLogic_CreateInitialCCBs(struct BusLogic_HostAdapter *HostAdapter) +static bool __init blogic_create_initccbs(struct blogic_adapter *adapter) { - int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(struct BusLogic_CCB); - void *BlockPointer; - dma_addr_t BlockPointerHandle; - while (HostAdapter->AllocatedCCBs < HostAdapter->InitialCCBs) { - BlockPointer = pci_alloc_consistent(HostAdapter->PCI_Device, BlockSize, &BlockPointerHandle); - if (BlockPointer == NULL) { - BusLogic_Error("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n", HostAdapter); + int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb); + void *blk_pointer; + dma_addr_t blkp; + + while (adapter->alloc_ccbs < adapter->initccbs) { + blk_pointer = pci_alloc_consistent(adapter->pci_device, + blk_size, &blkp); + if (blk_pointer == NULL) { + blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n", + adapter); return false; } - BusLogic_InitializeCCBs(HostAdapter, BlockPointer, BlockSize, BlockPointerHandle); + blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); } return true; } /* - BusLogic_DestroyCCBs deallocates the CCBs for Host Adapter. + blogic_destroy_ccbs deallocates the CCBs for Host Adapter. */ -static void BusLogic_DestroyCCBs(struct BusLogic_HostAdapter *HostAdapter) +static void blogic_destroy_ccbs(struct blogic_adapter *adapter) { - struct BusLogic_CCB *NextCCB = HostAdapter->All_CCBs, *CCB, *Last_CCB = NULL; - HostAdapter->All_CCBs = NULL; - HostAdapter->Free_CCBs = NULL; - while ((CCB = NextCCB) != NULL) { - NextCCB = CCB->NextAll; - if (CCB->AllocationGroupHead) { - if (Last_CCB) - pci_free_consistent(HostAdapter->PCI_Device, Last_CCB->AllocationGroupSize, Last_CCB, Last_CCB->AllocationGroupHead); - Last_CCB = CCB; + struct blogic_ccb *next_ccb = adapter->all_ccbs, *ccb, *lastccb = NULL; + adapter->all_ccbs = NULL; + adapter->free_ccbs = NULL; + while ((ccb = next_ccb) != NULL) { + next_ccb = ccb->next_all; + if (ccb->allocgrp_head) { + if (lastccb) + pci_free_consistent(adapter->pci_device, + lastccb->allocgrp_size, lastccb, + lastccb->allocgrp_head); + lastccb = ccb; } } - if (Last_CCB) - pci_free_consistent(HostAdapter->PCI_Device, Last_CCB->AllocationGroupSize, Last_CCB, Last_CCB->AllocationGroupHead); + if (lastccb) + pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size, + lastccb, lastccb->allocgrp_head); } /* - BusLogic_CreateAdditionalCCBs allocates Additional CCBs for Host Adapter. If + blogic_create_addlccbs allocates Additional CCBs for Host Adapter. If allocation fails and there are no remaining CCBs available, the Driver Queue Depth is decreased to a known safe value to avoid potential deadlocks when multiple host adapters share the same IRQ Channel. */ -static void BusLogic_CreateAdditionalCCBs(struct BusLogic_HostAdapter *HostAdapter, int AdditionalCCBs, bool SuccessMessageP) +static void blogic_create_addlccbs(struct blogic_adapter *adapter, + int addl_ccbs, bool print_success) { - int BlockSize = BusLogic_CCB_AllocationGroupSize * sizeof(struct BusLogic_CCB); - int PreviouslyAllocated = HostAdapter->AllocatedCCBs; - void *BlockPointer; - dma_addr_t BlockPointerHandle; - if (AdditionalCCBs <= 0) + int blk_size = BLOGIC_CCB_GRP_ALLOCSIZE * sizeof(struct blogic_ccb); + int prev_alloc = adapter->alloc_ccbs; + void *blk_pointer; + dma_addr_t blkp; + if (addl_ccbs <= 0) return; - while (HostAdapter->AllocatedCCBs - PreviouslyAllocated < AdditionalCCBs) { - BlockPointer = pci_alloc_consistent(HostAdapter->PCI_Device, BlockSize, &BlockPointerHandle); - if (BlockPointer == NULL) + while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) { + blk_pointer = pci_alloc_consistent(adapter->pci_device, + blk_size, &blkp); + if (blk_pointer == NULL) break; - BusLogic_InitializeCCBs(HostAdapter, BlockPointer, BlockSize, BlockPointerHandle); + blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp); } - if (HostAdapter->AllocatedCCBs > PreviouslyAllocated) { - if (SuccessMessageP) - BusLogic_Notice("Allocated %d additional CCBs (total now %d)\n", HostAdapter, HostAdapter->AllocatedCCBs - PreviouslyAllocated, HostAdapter->AllocatedCCBs); + if (adapter->alloc_ccbs > prev_alloc) { + if (print_success) + blogic_notice("Allocated %d additional CCBs (total now %d)\n", adapter, adapter->alloc_ccbs - prev_alloc, adapter->alloc_ccbs); return; } - BusLogic_Notice("Failed to allocate additional CCBs\n", HostAdapter); - if (HostAdapter->DriverQueueDepth > HostAdapter->AllocatedCCBs - HostAdapter->TargetDeviceCount) { - HostAdapter->DriverQueueDepth = HostAdapter->AllocatedCCBs - HostAdapter->TargetDeviceCount; - HostAdapter->SCSI_Host->can_queue = HostAdapter->DriverQueueDepth; + blogic_notice("Failed to allocate additional CCBs\n", adapter); + if (adapter->drvr_qdepth > adapter->alloc_ccbs - adapter->tgt_count) { + adapter->drvr_qdepth = adapter->alloc_ccbs - adapter->tgt_count; + adapter->scsi_host->can_queue = adapter->drvr_qdepth; } } /* - BusLogic_AllocateCCB allocates a CCB from Host Adapter's free list, + blogic_alloc_ccb allocates a CCB from Host Adapter's free list, allocating more memory from the Kernel if necessary. The Host Adapter's Lock should already have been acquired by the caller. */ -static struct BusLogic_CCB *BusLogic_AllocateCCB(struct BusLogic_HostAdapter - *HostAdapter) +static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter) { - static unsigned long SerialNumber = 0; - struct BusLogic_CCB *CCB; - CCB = HostAdapter->Free_CCBs; - if (CCB != NULL) { - CCB->SerialNumber = ++SerialNumber; - HostAdapter->Free_CCBs = CCB->Next; - CCB->Next = NULL; - if (HostAdapter->Free_CCBs == NULL) - BusLogic_CreateAdditionalCCBs(HostAdapter, HostAdapter->IncrementalCCBs, true); - return CCB; - } - BusLogic_CreateAdditionalCCBs(HostAdapter, HostAdapter->IncrementalCCBs, true); - CCB = HostAdapter->Free_CCBs; - if (CCB == NULL) + static unsigned long serial; + struct blogic_ccb *ccb; + ccb = adapter->free_ccbs; + if (ccb != NULL) { + ccb->serial = ++serial; + adapter->free_ccbs = ccb->next; + ccb->next = NULL; + if (adapter->free_ccbs == NULL) + blogic_create_addlccbs(adapter, adapter->inc_ccbs, + true); + return ccb; + } + blogic_create_addlccbs(adapter, adapter->inc_ccbs, true); + ccb = adapter->free_ccbs; + if (ccb == NULL) return NULL; - CCB->SerialNumber = ++SerialNumber; - HostAdapter->Free_CCBs = CCB->Next; - CCB->Next = NULL; - return CCB; + ccb->serial = ++serial; + adapter->free_ccbs = ccb->next; + ccb->next = NULL; + return ccb; } /* - BusLogic_DeallocateCCB deallocates a CCB, returning it to the Host Adapter's + blogic_dealloc_ccb deallocates a CCB, returning it to the Host Adapter's free list. The Host Adapter's Lock should already have been acquired by the caller. */ -static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB) +static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap) { - struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter; - - scsi_dma_unmap(CCB->Command); - pci_unmap_single(HostAdapter->PCI_Device, CCB->SenseDataPointer, - CCB->SenseDataLength, PCI_DMA_FROMDEVICE); - - CCB->Command = NULL; - CCB->Status = BusLogic_CCB_Free; - CCB->Next = HostAdapter->Free_CCBs; - HostAdapter->Free_CCBs = CCB; + struct blogic_adapter *adapter = ccb->adapter; + + if (ccb->command != NULL) + scsi_dma_unmap(ccb->command); + if (dma_unmap) + pci_unmap_single(adapter->pci_device, ccb->sensedata, + ccb->sense_datalen, PCI_DMA_FROMDEVICE); + + ccb->command = NULL; + ccb->status = BLOGIC_CCB_FREE; + ccb->next = adapter->free_ccbs; + adapter->free_ccbs = ccb; } /* - BusLogic_Command sends the command OperationCode to HostAdapter, optionally - providing ParameterLength bytes of ParameterData and receiving at most - ReplyLength bytes of ReplyData; any excess reply data is received but + blogic_cmd sends the command opcode to adapter, optionally + providing paramlen bytes of param and receiving at most + replylen bytes of reply; any excess reply data is received but discarded. On success, this function returns the number of reply bytes read from the Host Adapter (including any discarded data); on failure, it returns -1 if the command was invalid, or -2 if a timeout occurred. - BusLogic_Command is called exclusively during host adapter detection and + blogic_cmd is called exclusively during host adapter detection and initialization, so performance and latency are not critical, and exclusive access to the Host Adapter hardware is assumed. Once the host adapter and driver are initialized, the only Host Adapter command that is issued is the @@ -334,255 +346,274 @@ static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB) waiting for the Host Adapter Ready bit to be set in the Status Register. */ -static int BusLogic_Command(struct BusLogic_HostAdapter *HostAdapter, enum BusLogic_OperationCode OperationCode, void *ParameterData, int ParameterLength, void *ReplyData, int ReplyLength) +static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode, + void *param, int paramlen, void *reply, int replylen) { - unsigned char *ParameterPointer = (unsigned char *) ParameterData; - unsigned char *ReplyPointer = (unsigned char *) ReplyData; - union BusLogic_StatusRegister StatusRegister; - union BusLogic_InterruptRegister InterruptRegister; - unsigned long ProcessorFlags = 0; - int ReplyBytes = 0, Result; - long TimeoutCounter; + unsigned char *param_p = (unsigned char *) param; + unsigned char *reply_p = (unsigned char *) reply; + union blogic_stat_reg statusreg; + union blogic_int_reg intreg; + unsigned long processor_flag = 0; + int reply_b = 0, result; + long timeout; /* Clear out the Reply Data if provided. */ - if (ReplyLength > 0) - memset(ReplyData, 0, ReplyLength); + if (replylen > 0) + memset(reply, 0, replylen); /* - If the IRQ Channel has not yet been acquired, then interrupts must be - disabled while issuing host adapter commands since a Command Complete - interrupt could occur if the IRQ Channel was previously enabled by another - BusLogic Host Adapter or another driver sharing the same IRQ Channel. + If the IRQ Channel has not yet been acquired, then interrupts + must be disabled while issuing host adapter commands since a + Command Complete interrupt could occur if the IRQ Channel was + previously enabled by another BusLogic Host Adapter or another + driver sharing the same IRQ Channel. */ - if (!HostAdapter->IRQ_ChannelAcquired) - local_irq_save(ProcessorFlags); + if (!adapter->irq_acquired) + local_irq_save(processor_flag); /* - Wait for the Host Adapter Ready bit to be set and the Command/Parameter - Register Busy bit to be reset in the Status Register. + Wait for the Host Adapter Ready bit to be set and the + Command/Parameter Register Busy bit to be reset in the Status + Register. */ - TimeoutCounter = 10000; - while (--TimeoutCounter >= 0) { - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (StatusRegister.sr.HostAdapterReady && !StatusRegister.sr.CommandParameterRegisterBusy) + timeout = 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.adapter_ready && !statusreg.sr.cmd_param_busy) break; udelay(100); } - if (TimeoutCounter < 0) { - BusLogic_CommandFailureReason = "Timeout waiting for Host Adapter Ready"; - Result = -2; - goto Done; + if (timeout < 0) { + blogic_cmd_failure_reason = + "Timeout waiting for Host Adapter Ready"; + result = -2; + goto done; } /* - Write the OperationCode to the Command/Parameter Register. + Write the opcode to the Command/Parameter Register. */ - HostAdapter->HostAdapterCommandCompleted = false; - BusLogic_WriteCommandParameterRegister(HostAdapter, OperationCode); + adapter->adapter_cmd_complete = false; + blogic_setcmdparam(adapter, opcode); /* Write any additional Parameter Bytes. */ - TimeoutCounter = 10000; - while (ParameterLength > 0 && --TimeoutCounter >= 0) { + timeout = 10000; + while (paramlen > 0 && --timeout >= 0) { /* - Wait 100 microseconds to give the Host Adapter enough time to determine - whether the last value written to the Command/Parameter Register was - valid or not. If the Command Complete bit is set in the Interrupt - Register, then the Command Invalid bit in the Status Register will be - reset if the Operation Code or Parameter was valid and the command - has completed, or set if the Operation Code or Parameter was invalid. - If the Data In Register Ready bit is set in the Status Register, then - the Operation Code was valid, and data is waiting to be read back - from the Host Adapter. Otherwise, wait for the Command/Parameter - Register Busy bit in the Status Register to be reset. + Wait 100 microseconds to give the Host Adapter enough + time to determine whether the last value written to the + Command/Parameter Register was valid or not. If the + Command Complete bit is set in the Interrupt Register, + then the Command Invalid bit in the Status Register will + be reset if the Operation Code or Parameter was valid + and the command has completed, or set if the Operation + Code or Parameter was invalid. If the Data In Register + Ready bit is set in the Status Register, then the + Operation Code was valid, and data is waiting to be read + back from the Host Adapter. Otherwise, wait for the + Command/Parameter Register Busy bit in the Status + Register to be reset. */ udelay(100); - InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter); - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (InterruptRegister.ir.CommandComplete) + intreg.all = blogic_rdint(adapter); + statusreg.all = blogic_rdstatus(adapter); + if (intreg.ir.cmd_complete) break; - if (HostAdapter->HostAdapterCommandCompleted) + if (adapter->adapter_cmd_complete) break; - if (StatusRegister.sr.DataInRegisterReady) + if (statusreg.sr.datain_ready) break; - if (StatusRegister.sr.CommandParameterRegisterBusy) + if (statusreg.sr.cmd_param_busy) continue; - BusLogic_WriteCommandParameterRegister(HostAdapter, *ParameterPointer++); - ParameterLength--; - } - if (TimeoutCounter < 0) { - BusLogic_CommandFailureReason = "Timeout waiting for Parameter Acceptance"; - Result = -2; - goto Done; - } - /* - The Modify I/O Address command does not cause a Command Complete Interrupt. - */ - if (OperationCode == BusLogic_ModifyIOAddress) { - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (StatusRegister.sr.CommandInvalid) { - BusLogic_CommandFailureReason = "Modify I/O Address Invalid"; - Result = -1; - goto Done; + blogic_setcmdparam(adapter, *param_p++); + paramlen--; + } + if (timeout < 0) { + blogic_cmd_failure_reason = + "Timeout waiting for Parameter Acceptance"; + result = -2; + goto done; + } + /* + The Modify I/O Address command does not cause a Command Complete + Interrupt. + */ + if (opcode == BLOGIC_MOD_IOADDR) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.cmd_invalid) { + blogic_cmd_failure_reason = + "Modify I/O Address Invalid"; + result = -1; + goto done; } - if (BusLogic_GlobalOptions.TraceConfiguration) - BusLogic_Notice("BusLogic_Command(%02X) Status = %02X: " "(Modify I/O Address)\n", HostAdapter, OperationCode, StatusRegister.All); - Result = 0; - goto Done; + if (blogic_global_options.trace_config) + blogic_notice("blogic_cmd(%02X) Status = %02X: " "(Modify I/O Address)\n", adapter, opcode, statusreg.all); + result = 0; + goto done; } /* Select an appropriate timeout value for awaiting command completion. */ - switch (OperationCode) { - case BusLogic_InquireInstalledDevicesID0to7: - case BusLogic_InquireInstalledDevicesID8to15: - case BusLogic_InquireTargetDevices: + switch (opcode) { + case BLOGIC_INQ_DEV0TO7: + case BLOGIC_INQ_DEV8TO15: + case BLOGIC_INQ_DEV: /* Approximately 60 seconds. */ - TimeoutCounter = 60 * 10000; + timeout = 60 * 10000; break; default: /* Approximately 1 second. */ - TimeoutCounter = 10000; + timeout = 10000; break; } /* - Receive any Reply Bytes, waiting for either the Command Complete bit to - be set in the Interrupt Register, or for the Interrupt Handler to set the - Host Adapter Command Completed bit in the Host Adapter structure. + Receive any Reply Bytes, waiting for either the Command + Complete bit to be set in the Interrupt Register, or for the + Interrupt Handler to set the Host Adapter Command Completed + bit in the Host Adapter structure. */ - while (--TimeoutCounter >= 0) { - InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter); - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (InterruptRegister.ir.CommandComplete) + while (--timeout >= 0) { + intreg.all = blogic_rdint(adapter); + statusreg.all = blogic_rdstatus(adapter); + if (intreg.ir.cmd_complete) break; - if (HostAdapter->HostAdapterCommandCompleted) + if (adapter->adapter_cmd_complete) break; - if (StatusRegister.sr.DataInRegisterReady) { - if (++ReplyBytes <= ReplyLength) - *ReplyPointer++ = BusLogic_ReadDataInRegister(HostAdapter); + if (statusreg.sr.datain_ready) { + if (++reply_b <= replylen) + *reply_p++ = blogic_rddatain(adapter); else - BusLogic_ReadDataInRegister(HostAdapter); + blogic_rddatain(adapter); } - if (OperationCode == BusLogic_FetchHostAdapterLocalRAM && StatusRegister.sr.HostAdapterReady) + if (opcode == BLOGIC_FETCH_LOCALRAM && + statusreg.sr.adapter_ready) break; udelay(100); } - if (TimeoutCounter < 0) { - BusLogic_CommandFailureReason = "Timeout waiting for Command Complete"; - Result = -2; - goto Done; + if (timeout < 0) { + blogic_cmd_failure_reason = + "Timeout waiting for Command Complete"; + result = -2; + goto done; } /* Clear any pending Command Complete Interrupt. */ - BusLogic_InterruptReset(HostAdapter); + blogic_intreset(adapter); /* Provide tracing information if requested. */ - if (BusLogic_GlobalOptions.TraceConfiguration) { + if (blogic_global_options.trace_config) { int i; - BusLogic_Notice("BusLogic_Command(%02X) Status = %02X: %2d ==> %2d:", HostAdapter, OperationCode, StatusRegister.All, ReplyLength, ReplyBytes); - if (ReplyLength > ReplyBytes) - ReplyLength = ReplyBytes; - for (i = 0; i < ReplyLength; i++) - BusLogic_Notice(" %02X", HostAdapter, ((unsigned char *) ReplyData)[i]); - BusLogic_Notice("\n", HostAdapter); + blogic_notice("blogic_cmd(%02X) Status = %02X: %2d ==> %2d:", + adapter, opcode, statusreg.all, replylen, + reply_b); + if (replylen > reply_b) + replylen = reply_b; + for (i = 0; i < replylen; i++) + blogic_notice(" %02X", adapter, + ((unsigned char *) reply)[i]); + blogic_notice("\n", adapter); } /* Process Command Invalid conditions. */ - if (StatusRegister.sr.CommandInvalid) { + if (statusreg.sr.cmd_invalid) { /* - Some early BusLogic Host Adapters may not recover properly from - a Command Invalid condition, so if this appears to be the case, - a Soft Reset is issued to the Host Adapter. Potentially invalid - commands are never attempted after Mailbox Initialization is - performed, so there should be no Host Adapter state lost by a + Some early BusLogic Host Adapters may not recover + properly from a Command Invalid condition, so if this + appears to be the case, a Soft Reset is issued to the + Host Adapter. Potentially invalid commands are never + attempted after Mailbox Initialization is performed, + so there should be no Host Adapter state lost by a Soft Reset in response to a Command Invalid condition. */ udelay(1000); - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (StatusRegister.sr.CommandInvalid || - StatusRegister.sr.Reserved || - StatusRegister.sr.DataInRegisterReady || - StatusRegister.sr.CommandParameterRegisterBusy || !StatusRegister.sr.HostAdapterReady || !StatusRegister.sr.InitializationRequired || StatusRegister.sr.DiagnosticActive || StatusRegister.sr.DiagnosticFailure) { - BusLogic_SoftReset(HostAdapter); + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.cmd_invalid || statusreg.sr.rsvd || + statusreg.sr.datain_ready || + statusreg.sr.cmd_param_busy || + !statusreg.sr.adapter_ready || + !statusreg.sr.init_reqd || + statusreg.sr.diag_active || + statusreg.sr.diag_failed) { + blogic_softreset(adapter); udelay(1000); } - BusLogic_CommandFailureReason = "Command Invalid"; - Result = -1; - goto Done; + blogic_cmd_failure_reason = "Command Invalid"; + result = -1; + goto done; } /* Handle Excess Parameters Supplied conditions. */ - if (ParameterLength > 0) { - BusLogic_CommandFailureReason = "Excess Parameters Supplied"; - Result = -1; - goto Done; + if (paramlen > 0) { + blogic_cmd_failure_reason = "Excess Parameters Supplied"; + result = -1; + goto done; } /* Indicate the command completed successfully. */ - BusLogic_CommandFailureReason = NULL; - Result = ReplyBytes; + blogic_cmd_failure_reason = NULL; + result = reply_b; /* Restore the interrupt status if necessary and return. */ - Done: - if (!HostAdapter->IRQ_ChannelAcquired) - local_irq_restore(ProcessorFlags); - return Result; +done: + if (!adapter->irq_acquired) + local_irq_restore(processor_flag); + return result; } /* - BusLogic_AppendProbeAddressISA appends a single ISA I/O Address to the list + blogic_add_probeaddr_isa appends a single ISA I/O Address to the list of I/O Address and Bus Probe Information to be checked for potential BusLogic Host Adapters. */ -static void __init BusLogic_AppendProbeAddressISA(unsigned long IO_Address) +static void __init blogic_add_probeaddr_isa(unsigned long io_addr) { - struct BusLogic_ProbeInfo *ProbeInfo; - if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters) + struct blogic_probeinfo *probeinfo; + if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS) return; - ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++]; - ProbeInfo->HostAdapterType = BusLogic_MultiMaster; - ProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus; - ProbeInfo->IO_Address = IO_Address; - ProbeInfo->PCI_Device = NULL; + probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++]; + probeinfo->adapter_type = BLOGIC_MULTIMASTER; + probeinfo->adapter_bus_type = BLOGIC_ISA_BUS; + probeinfo->io_addr = io_addr; + probeinfo->pci_device = NULL; } /* - BusLogic_InitializeProbeInfoListISA initializes the list of I/O Address and + blogic_init_probeinfo_isa initializes the list of I/O Address and Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters only from the list of standard BusLogic MultiMaster ISA I/O Addresses. */ -static void __init BusLogic_InitializeProbeInfoListISA(struct BusLogic_HostAdapter - *PrototypeHostAdapter) +static void __init blogic_init_probeinfo_isa(struct blogic_adapter *adapter) { /* - If BusLogic Driver Options specifications requested that ISA Bus Probes - be inhibited, do not proceed further. + If BusLogic Driver Options specifications requested that ISA + Bus Probes be inhibited, do not proceed further. */ - if (BusLogic_ProbeOptions.NoProbeISA) + if (blogic_probe_options.noprobe_isa) return; /* Append the list of standard BusLogic MultiMaster ISA I/O Addresses. */ - if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe330) - BusLogic_AppendProbeAddressISA(0x330); - if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe334) - BusLogic_AppendProbeAddressISA(0x334); - if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe230) - BusLogic_AppendProbeAddressISA(0x230); - if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe234) - BusLogic_AppendProbeAddressISA(0x234); - if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe130) - BusLogic_AppendProbeAddressISA(0x130); - if (!BusLogic_ProbeOptions.LimitedProbeISA || BusLogic_ProbeOptions.Probe134) - BusLogic_AppendProbeAddressISA(0x134); + if (!blogic_probe_options.limited_isa || blogic_probe_options.probe330) + blogic_add_probeaddr_isa(0x330); + if (!blogic_probe_options.limited_isa || blogic_probe_options.probe334) + blogic_add_probeaddr_isa(0x334); + if (!blogic_probe_options.limited_isa || blogic_probe_options.probe230) + blogic_add_probeaddr_isa(0x230); + if (!blogic_probe_options.limited_isa || blogic_probe_options.probe234) + blogic_add_probeaddr_isa(0x234); + if (!blogic_probe_options.limited_isa || blogic_probe_options.probe130) + blogic_add_probeaddr_isa(0x130); + if (!blogic_probe_options.limited_isa || blogic_probe_options.probe134) + blogic_add_probeaddr_isa(0x134); } @@ -590,25 +621,35 @@ static void __init BusLogic_InitializeProbeInfoListISA(struct BusLogic_HostAdapt /* - BusLogic_SortProbeInfo sorts a section of BusLogic_ProbeInfoList in order + blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order of increasing PCI Bus and Device Number. */ -static void __init BusLogic_SortProbeInfo(struct BusLogic_ProbeInfo *ProbeInfoList, int ProbeInfoCount) +static void __init blogic_sort_probeinfo(struct blogic_probeinfo + *probeinfo_list, int probeinfo_cnt) { - int LastInterchange = ProbeInfoCount - 1, Bound, j; - while (LastInterchange > 0) { - Bound = LastInterchange; - LastInterchange = 0; - for (j = 0; j < Bound; j++) { - struct BusLogic_ProbeInfo *ProbeInfo1 = &ProbeInfoList[j]; - struct BusLogic_ProbeInfo *ProbeInfo2 = &ProbeInfoList[j + 1]; - if (ProbeInfo1->Bus > ProbeInfo2->Bus || (ProbeInfo1->Bus == ProbeInfo2->Bus && (ProbeInfo1->Device > ProbeInfo2->Device))) { - struct BusLogic_ProbeInfo TempProbeInfo; - memcpy(&TempProbeInfo, ProbeInfo1, sizeof(struct BusLogic_ProbeInfo)); - memcpy(ProbeInfo1, ProbeInfo2, sizeof(struct BusLogic_ProbeInfo)); - memcpy(ProbeInfo2, &TempProbeInfo, sizeof(struct BusLogic_ProbeInfo)); - LastInterchange = j; + int last_exchange = probeinfo_cnt - 1, bound, j; + + while (last_exchange > 0) { + bound = last_exchange; + last_exchange = 0; + for (j = 0; j < bound; j++) { + struct blogic_probeinfo *probeinfo1 = + &probeinfo_list[j]; + struct blogic_probeinfo *probeinfo2 = + &probeinfo_list[j + 1]; + if (probeinfo1->bus > probeinfo2->bus || + (probeinfo1->bus == probeinfo2->bus && + (probeinfo1->dev > probeinfo2->dev))) { + struct blogic_probeinfo tmp_probeinfo; + + memcpy(&tmp_probeinfo, probeinfo1, + sizeof(struct blogic_probeinfo)); + memcpy(probeinfo1, probeinfo2, + sizeof(struct blogic_probeinfo)); + memcpy(probeinfo2, &tmp_probeinfo, + sizeof(struct blogic_probeinfo)); + last_exchange = j; } } } @@ -616,84 +657,88 @@ static void __init BusLogic_SortProbeInfo(struct BusLogic_ProbeInfo *ProbeInfoLi /* - BusLogic_InitializeMultiMasterProbeInfo initializes the list of I/O Address + blogic_init_mm_probeinfo initializes the list of I/O Address and Bus Probe Information to be checked for potential BusLogic MultiMaster SCSI Host Adapters by interrogating the PCI Configuration Space on PCI machines as well as from the list of standard BusLogic MultiMaster ISA I/O Addresses. It returns the number of PCI MultiMaster Host Adapters found. */ -static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAdapter - *PrototypeHostAdapter) +static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) { - struct BusLogic_ProbeInfo *PrimaryProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount]; - int NonPrimaryPCIMultiMasterIndex = BusLogic_ProbeInfoCount + 1; - int NonPrimaryPCIMultiMasterCount = 0, PCIMultiMasterCount = 0; - bool ForceBusDeviceScanningOrder = false; - bool ForceBusDeviceScanningOrderChecked = false; - bool StandardAddressSeen[6]; - struct pci_dev *PCI_Device = NULL; + struct blogic_probeinfo *pr_probeinfo = + &blogic_probeinfo_list[blogic_probeinfo_count]; + int nonpr_mmindex = blogic_probeinfo_count + 1; + int nonpr_mmcount = 0, mmcount = 0; + bool force_scan_order = false; + bool force_scan_order_checked = false; + bool addr_seen[6]; + struct pci_dev *pci_device = NULL; int i; - if (BusLogic_ProbeInfoCount >= BusLogic_MaxHostAdapters) + if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS) return 0; - BusLogic_ProbeInfoCount++; + blogic_probeinfo_count++; for (i = 0; i < 6; i++) - StandardAddressSeen[i] = false; - /* - Iterate over the MultiMaster PCI Host Adapters. For each enumerated host - adapter, determine whether its ISA Compatible I/O Port is enabled and if - so, whether it is assigned the Primary I/O Address. A host adapter that is - assigned the Primary I/O Address will always be the preferred boot device. - The MultiMaster BIOS will first recognize a host adapter at the Primary I/O - Address, then any other PCI host adapters, and finally any host adapters - located at the remaining standard ISA I/O Addresses. When a PCI host - adapter is found with its ISA Compatible I/O Port enabled, a command is - issued to disable the ISA Compatible I/O Port, and it is noted that the + addr_seen[i] = false; + /* + Iterate over the MultiMaster PCI Host Adapters. For each + enumerated host adapter, determine whether its ISA Compatible + I/O Port is enabled and if so, whether it is assigned the + Primary I/O Address. A host adapter that is assigned the + Primary I/O Address will always be the preferred boot device. + The MultiMaster BIOS will first recognize a host adapter at + the Primary I/O Address, then any other PCI host adapters, + and finally any host adapters located at the remaining + standard ISA I/O Addresses. When a PCI host adapter is found + with its ISA Compatible I/O Port enabled, a command is issued + to disable the ISA Compatible I/O Port, and it is noted that the particular standard ISA I/O Address need not be probed. */ - PrimaryProbeInfo->IO_Address = 0; - while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_Device)) != NULL) { - struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter; - struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation; - enum BusLogic_ISACompatibleIOPort ModifyIOAddressRequest; - unsigned char Bus; - unsigned char Device; - unsigned int IRQ_Channel; - unsigned long BaseAddress0; - unsigned long BaseAddress1; - unsigned long IO_Address; - unsigned long PCI_Address; - - if (pci_enable_device(PCI_Device)) + pr_probeinfo->io_addr = 0; + while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, + PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, + pci_device)) != NULL) { + struct blogic_adapter *host_adapter = adapter; + struct blogic_adapter_info adapter_info; + enum blogic_isa_ioport mod_ioaddr_req; + unsigned char bus; + unsigned char device; + unsigned int irq_ch; + unsigned long base_addr0; + unsigned long base_addr1; + unsigned long io_addr; + unsigned long pci_addr; + + if (pci_enable_device(pci_device)) continue; - if (pci_set_dma_mask(PCI_Device, DMA_BIT_MASK(32) )) + if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) continue; - Bus = PCI_Device->bus->number; - Device = PCI_Device->devfn >> 3; - IRQ_Channel = PCI_Device->irq; - IO_Address = BaseAddress0 = pci_resource_start(PCI_Device, 0); - PCI_Address = BaseAddress1 = pci_resource_start(PCI_Device, 1); + bus = pci_device->bus->number; + device = pci_device->devfn >> 3; + irq_ch = pci_device->irq; + io_addr = base_addr0 = pci_resource_start(pci_device, 0); + pci_addr = base_addr1 = pci_resource_start(pci_device, 1); - if (pci_resource_flags(PCI_Device, 0) & IORESOURCE_MEM) { - BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, BaseAddress0); - BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, Bus, Device, IO_Address); + if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { + blogic_err("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); continue; } - if (pci_resource_flags(PCI_Device, 1) & IORESOURCE_IO) { - BusLogic_Error("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, BaseAddress1); - BusLogic_Error("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, Bus, Device, PCI_Address); + if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { + blogic_err("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr); continue; } - if (IRQ_Channel == 0) { - BusLogic_Error("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, IRQ_Channel); - BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, Bus, Device, IO_Address); + if (irq_ch == 0) { + blogic_err("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); continue; } - if (BusLogic_GlobalOptions.TraceProbe) { - BusLogic_Notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL); - BusLogic_Notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, Bus, Device, IO_Address, PCI_Address); + if (blogic_global_options.trace_probe) { + blogic_notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr); } /* Issue the Inquire PCI Host Adapter Information command to determine @@ -701,238 +746,258 @@ static int __init BusLogic_InitializeMultiMasterProbeInfo(struct BusLogic_HostAd known and enabled, note that the particular Standard ISA I/O Address should not be probed. */ - HostAdapter->IO_Address = IO_Address; - BusLogic_InterruptReset(HostAdapter); - if (BusLogic_Command(HostAdapter, BusLogic_InquirePCIHostAdapterInformation, NULL, 0, &PCIHostAdapterInformation, sizeof(PCIHostAdapterInformation)) - == sizeof(PCIHostAdapterInformation)) { - if (PCIHostAdapterInformation.ISACompatibleIOPort < 6) - StandardAddressSeen[PCIHostAdapterInformation.ISACompatibleIOPort] = true; + host_adapter->io_addr = io_addr; + blogic_intreset(host_adapter); + if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, + &adapter_info, sizeof(adapter_info)) == + sizeof(adapter_info)) { + if (adapter_info.isa_port < 6) + addr_seen[adapter_info.isa_port] = true; } else - PCIHostAdapterInformation.ISACompatibleIOPort = BusLogic_IO_Disable; + adapter_info.isa_port = BLOGIC_IO_DISABLE; /* - * Issue the Modify I/O Address command to disable the ISA Compatible - * I/O Port. On PCI Host Adapters, the Modify I/O Address command - * allows modification of the ISA compatible I/O Address that the Host - * Adapter responds to; it does not affect the PCI compliant I/O Address - * assigned at system initialization. + Issue the Modify I/O Address command to disable the + ISA Compatible I/O Port. On PCI Host Adapters, the + Modify I/O Address command allows modification of the + ISA compatible I/O Address that the Host Adapter + responds to; it does not affect the PCI compliant + I/O Address assigned at system initialization. */ - ModifyIOAddressRequest = BusLogic_IO_Disable; - BusLogic_Command(HostAdapter, BusLogic_ModifyIOAddress, &ModifyIOAddressRequest, sizeof(ModifyIOAddressRequest), NULL, 0); + mod_ioaddr_req = BLOGIC_IO_DISABLE; + blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, + sizeof(mod_ioaddr_req), NULL, 0); /* - For the first MultiMaster Host Adapter enumerated, issue the Fetch - Host Adapter Local RAM command to read byte 45 of the AutoSCSI area, - for the setting of the "Use Bus And Device # For PCI Scanning Seq." - option. Issue the Inquire Board ID command since this option is + For the first MultiMaster Host Adapter enumerated, + issue the Fetch Host Adapter Local RAM command to read + byte 45 of the AutoSCSI area, for the setting of the + "Use Bus And Device # For PCI Scanning Seq." option. + Issue the Inquire Board ID command since this option is only valid for the BT-948/958/958D. */ - if (!ForceBusDeviceScanningOrderChecked) { - struct BusLogic_FetchHostAdapterLocalRAMRequest FetchHostAdapterLocalRAMRequest; - struct BusLogic_AutoSCSIByte45 AutoSCSIByte45; - struct BusLogic_BoardID BoardID; - FetchHostAdapterLocalRAMRequest.ByteOffset = BusLogic_AutoSCSI_BaseOffset + 45; - FetchHostAdapterLocalRAMRequest.ByteCount = sizeof(AutoSCSIByte45); - BusLogic_Command(HostAdapter, BusLogic_FetchHostAdapterLocalRAM, &FetchHostAdapterLocalRAMRequest, sizeof(FetchHostAdapterLocalRAMRequest), &AutoSCSIByte45, sizeof(AutoSCSIByte45)); - BusLogic_Command(HostAdapter, BusLogic_InquireBoardID, NULL, 0, &BoardID, sizeof(BoardID)); - if (BoardID.FirmwareVersion1stDigit == '5') - ForceBusDeviceScanningOrder = AutoSCSIByte45.ForceBusDeviceScanningOrder; - ForceBusDeviceScanningOrderChecked = true; + if (!force_scan_order_checked) { + struct blogic_fetch_localram fetch_localram; + struct blogic_autoscsi_byte45 autoscsi_byte45; + struct blogic_board_id id; + + fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45; + fetch_localram.count = sizeof(autoscsi_byte45); + blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM, + &fetch_localram, sizeof(fetch_localram), + &autoscsi_byte45, + sizeof(autoscsi_byte45)); + blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0, + &id, sizeof(id)); + if (id.fw_ver_digit1 == '5') + force_scan_order = + autoscsi_byte45.force_scan_order; + force_scan_order_checked = true; } /* - Determine whether this MultiMaster Host Adapter has its ISA - Compatible I/O Port enabled and is assigned the Primary I/O Address. - If it does, then it is the Primary MultiMaster Host Adapter and must - be recognized first. If it does not, then it is added to the list - for probing after any Primary MultiMaster Host Adapter is probed. + Determine whether this MultiMaster Host Adapter has its + ISA Compatible I/O Port enabled and is assigned the + Primary I/O Address. If it does, then it is the Primary + MultiMaster Host Adapter and must be recognized first. + If it does not, then it is added to the list for probing + after any Primary MultiMaster Host Adapter is probed. */ - if (PCIHostAdapterInformation.ISACompatibleIOPort == BusLogic_IO_330) { - PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster; - PrimaryProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus; - PrimaryProbeInfo->IO_Address = IO_Address; - PrimaryProbeInfo->PCI_Address = PCI_Address; - PrimaryProbeInfo->Bus = Bus; - PrimaryProbeInfo->Device = Device; - PrimaryProbeInfo->IRQ_Channel = IRQ_Channel; - PrimaryProbeInfo->PCI_Device = pci_dev_get(PCI_Device); - PCIMultiMasterCount++; - } else if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) { - struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++]; - ProbeInfo->HostAdapterType = BusLogic_MultiMaster; - ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus; - ProbeInfo->IO_Address = IO_Address; - ProbeInfo->PCI_Address = PCI_Address; - ProbeInfo->Bus = Bus; - ProbeInfo->Device = Device; - ProbeInfo->IRQ_Channel = IRQ_Channel; - ProbeInfo->PCI_Device = pci_dev_get(PCI_Device); - NonPrimaryPCIMultiMasterCount++; - PCIMultiMasterCount++; + if (adapter_info.isa_port == BLOGIC_IO_330) { + pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER; + pr_probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + pr_probeinfo->io_addr = io_addr; + pr_probeinfo->pci_addr = pci_addr; + pr_probeinfo->bus = bus; + pr_probeinfo->dev = device; + pr_probeinfo->irq_ch = irq_ch; + pr_probeinfo->pci_device = pci_dev_get(pci_device); + mmcount++; + } else if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[blogic_probeinfo_count++]; + probeinfo->adapter_type = BLOGIC_MULTIMASTER; + probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + probeinfo->io_addr = io_addr; + probeinfo->pci_addr = pci_addr; + probeinfo->bus = bus; + probeinfo->dev = device; + probeinfo->irq_ch = irq_ch; + probeinfo->pci_device = pci_dev_get(pci_device); + nonpr_mmcount++; + mmcount++; } else - BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL); - } - /* - If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." option is ON - for the first enumerated MultiMaster Host Adapter, and if that host adapter - is a BT-948/958/958D, then the MultiMaster BIOS will recognize MultiMaster - Host Adapters in the order of increasing PCI Bus and Device Number. In - that case, sort the probe information into the same order the BIOS uses. - If this option is OFF, then the MultiMaster BIOS will recognize MultiMaster - Host Adapters in the order they are enumerated by the PCI BIOS, and hence - no sorting is necessary. - */ - if (ForceBusDeviceScanningOrder) - BusLogic_SortProbeInfo(&BusLogic_ProbeInfoList[NonPrimaryPCIMultiMasterIndex], NonPrimaryPCIMultiMasterCount); - /* - If no PCI MultiMaster Host Adapter is assigned the Primary I/O Address, - then the Primary I/O Address must be probed explicitly before any PCI - host adapters are probed. - */ - if (!BusLogic_ProbeOptions.NoProbeISA) - if (PrimaryProbeInfo->IO_Address == 0 && - (!BusLogic_ProbeOptions.LimitedProbeISA || - BusLogic_ProbeOptions.Probe330)) { - PrimaryProbeInfo->HostAdapterType = BusLogic_MultiMaster; - PrimaryProbeInfo->HostAdapterBusType = BusLogic_ISA_Bus; - PrimaryProbeInfo->IO_Address = 0x330; + blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL); + } + /* + If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." + option is ON for the first enumerated MultiMaster Host Adapter, + and if that host adapter is a BT-948/958/958D, then the + MultiMaster BIOS will recognize MultiMaster Host Adapters in + the order of increasing PCI Bus and Device Number. In that case, + sort the probe information into the same order the BIOS uses. + If this option is OFF, then the MultiMaster BIOS will recognize + MultiMaster Host Adapters in the order they are enumerated by + the PCI BIOS, and hence no sorting is necessary. + */ + if (force_scan_order) + blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex], + nonpr_mmcount); + /* + If no PCI MultiMaster Host Adapter is assigned the Primary + I/O Address, then the Primary I/O Address must be probed + explicitly before any PCI host adapters are probed. + */ + if (!blogic_probe_options.noprobe_isa) + if (pr_probeinfo->io_addr == 0 && + (!blogic_probe_options.limited_isa || + blogic_probe_options.probe330)) { + pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER; + pr_probeinfo->adapter_bus_type = BLOGIC_ISA_BUS; + pr_probeinfo->io_addr = 0x330; } /* Append the list of standard BusLogic MultiMaster ISA I/O Addresses, omitting the Primary I/O Address which has already been handled. */ - if (!BusLogic_ProbeOptions.NoProbeISA) { - if (!StandardAddressSeen[1] && - (!BusLogic_ProbeOptions.LimitedProbeISA || - BusLogic_ProbeOptions.Probe334)) - BusLogic_AppendProbeAddressISA(0x334); - if (!StandardAddressSeen[2] && - (!BusLogic_ProbeOptions.LimitedProbeISA || - BusLogic_ProbeOptions.Probe230)) - BusLogic_AppendProbeAddressISA(0x230); - if (!StandardAddressSeen[3] && - (!BusLogic_ProbeOptions.LimitedProbeISA || - BusLogic_ProbeOptions.Probe234)) - BusLogic_AppendProbeAddressISA(0x234); - if (!StandardAddressSeen[4] && - (!BusLogic_ProbeOptions.LimitedProbeISA || - BusLogic_ProbeOptions.Probe130)) - BusLogic_AppendProbeAddressISA(0x130); - if (!StandardAddressSeen[5] && - (!BusLogic_ProbeOptions.LimitedProbeISA || - BusLogic_ProbeOptions.Probe134)) - BusLogic_AppendProbeAddressISA(0x134); + if (!blogic_probe_options.noprobe_isa) { + if (!addr_seen[1] && + (!blogic_probe_options.limited_isa || + blogic_probe_options.probe334)) + blogic_add_probeaddr_isa(0x334); + if (!addr_seen[2] && + (!blogic_probe_options.limited_isa || + blogic_probe_options.probe230)) + blogic_add_probeaddr_isa(0x230); + if (!addr_seen[3] && + (!blogic_probe_options.limited_isa || + blogic_probe_options.probe234)) + blogic_add_probeaddr_isa(0x234); + if (!addr_seen[4] && + (!blogic_probe_options.limited_isa || + blogic_probe_options.probe130)) + blogic_add_probeaddr_isa(0x130); + if (!addr_seen[5] && + (!blogic_probe_options.limited_isa || + blogic_probe_options.probe134)) + blogic_add_probeaddr_isa(0x134); } /* Iterate over the older non-compliant MultiMaster PCI Host Adapters, noting the PCI bus location and assigned IRQ Channel. */ - PCI_Device = NULL; - while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, PCI_Device)) != NULL) { - unsigned char Bus; - unsigned char Device; - unsigned int IRQ_Channel; - unsigned long IO_Address; + pci_device = NULL; + while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, + PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, + pci_device)) != NULL) { + unsigned char bus; + unsigned char device; + unsigned int irq_ch; + unsigned long io_addr; - if (pci_enable_device(PCI_Device)) + if (pci_enable_device(pci_device)) continue; - if (pci_set_dma_mask(PCI_Device, DMA_BIT_MASK(32))) + if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) continue; - Bus = PCI_Device->bus->number; - Device = PCI_Device->devfn >> 3; - IRQ_Channel = PCI_Device->irq; - IO_Address = pci_resource_start(PCI_Device, 0); + bus = pci_device->bus->number; + device = pci_device->devfn >> 3; + irq_ch = pci_device->irq; + io_addr = pci_resource_start(pci_device, 0); - if (IO_Address == 0 || IRQ_Channel == 0) + if (io_addr == 0 || irq_ch == 0) continue; - for (i = 0; i < BusLogic_ProbeInfoCount; i++) { - struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[i]; - if (ProbeInfo->IO_Address == IO_Address && ProbeInfo->HostAdapterType == BusLogic_MultiMaster) { - ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus; - ProbeInfo->PCI_Address = 0; - ProbeInfo->Bus = Bus; - ProbeInfo->Device = Device; - ProbeInfo->IRQ_Channel = IRQ_Channel; - ProbeInfo->PCI_Device = pci_dev_get(PCI_Device); + for (i = 0; i < blogic_probeinfo_count; i++) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[i]; + if (probeinfo->io_addr == io_addr && + probeinfo->adapter_type == BLOGIC_MULTIMASTER) { + probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + probeinfo->pci_addr = 0; + probeinfo->bus = bus; + probeinfo->dev = device; + probeinfo->irq_ch = irq_ch; + probeinfo->pci_device = pci_dev_get(pci_device); break; } } } - return PCIMultiMasterCount; + return mmcount; } /* - BusLogic_InitializeFlashPointProbeInfo initializes the list of I/O Address + blogic_init_fp_probeinfo initializes the list of I/O Address and Bus Probe Information to be checked for potential BusLogic FlashPoint Host Adapters by interrogating the PCI Configuration Space. It returns the number of FlashPoint Host Adapters found. */ -static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAdapter - *PrototypeHostAdapter) +static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter) { - int FlashPointIndex = BusLogic_ProbeInfoCount, FlashPointCount = 0; - struct pci_dev *PCI_Device = NULL; + int fpindex = blogic_probeinfo_count, fpcount = 0; + struct pci_dev *pci_device = NULL; /* Interrogate PCI Configuration Space for any FlashPoint Host Adapters. */ - while ((PCI_Device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_Device)) != NULL) { - unsigned char Bus; - unsigned char Device; - unsigned int IRQ_Channel; - unsigned long BaseAddress0; - unsigned long BaseAddress1; - unsigned long IO_Address; - unsigned long PCI_Address; - - if (pci_enable_device(PCI_Device)) + while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC, + PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, + pci_device)) != NULL) { + unsigned char bus; + unsigned char device; + unsigned int irq_ch; + unsigned long base_addr0; + unsigned long base_addr1; + unsigned long io_addr; + unsigned long pci_addr; + + if (pci_enable_device(pci_device)) continue; - if (pci_set_dma_mask(PCI_Device, DMA_BIT_MASK(32))) + if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32))) continue; - Bus = PCI_Device->bus->number; - Device = PCI_Device->devfn >> 3; - IRQ_Channel = PCI_Device->irq; - IO_Address = BaseAddress0 = pci_resource_start(PCI_Device, 0); - PCI_Address = BaseAddress1 = pci_resource_start(PCI_Device, 1); + bus = pci_device->bus->number; + device = pci_device->devfn >> 3; + irq_ch = pci_device->irq; + io_addr = base_addr0 = pci_resource_start(pci_device, 0); + pci_addr = base_addr1 = pci_resource_start(pci_device, 1); #ifdef CONFIG_SCSI_FLASHPOINT - if (pci_resource_flags(PCI_Device, 0) & IORESOURCE_MEM) { - BusLogic_Error("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, BaseAddress0); - BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, Bus, Device, IO_Address); + if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { + blogic_err("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); continue; } - if (pci_resource_flags(PCI_Device, 1) & IORESOURCE_IO) { - BusLogic_Error("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, BaseAddress1); - BusLogic_Error("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, Bus, Device, PCI_Address); + if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { + blogic_err("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr); continue; } - if (IRQ_Channel == 0) { - BusLogic_Error("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, IRQ_Channel); - BusLogic_Error("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, Bus, Device, IO_Address); + if (irq_ch == 0) { + blogic_err("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); continue; } - if (BusLogic_GlobalOptions.TraceProbe) { - BusLogic_Notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL); - BusLogic_Notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, Bus, Device, IO_Address, PCI_Address); + if (blogic_global_options.trace_probe) { + blogic_notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr); } - if (BusLogic_ProbeInfoCount < BusLogic_MaxHostAdapters) { - struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[BusLogic_ProbeInfoCount++]; - ProbeInfo->HostAdapterType = BusLogic_FlashPoint; - ProbeInfo->HostAdapterBusType = BusLogic_PCI_Bus; - ProbeInfo->IO_Address = IO_Address; - ProbeInfo->PCI_Address = PCI_Address; - ProbeInfo->Bus = Bus; - ProbeInfo->Device = Device; - ProbeInfo->IRQ_Channel = IRQ_Channel; - ProbeInfo->PCI_Device = pci_dev_get(PCI_Device); - FlashPointCount++; + if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[blogic_probeinfo_count++]; + probeinfo->adapter_type = BLOGIC_FLASHPOINT; + probeinfo->adapter_bus_type = BLOGIC_PCI_BUS; + probeinfo->io_addr = io_addr; + probeinfo->pci_addr = pci_addr; + probeinfo->bus = bus; + probeinfo->dev = device; + probeinfo->irq_ch = irq_ch; + probeinfo->pci_device = pci_dev_get(pci_device); + fpcount++; } else - BusLogic_Warning("BusLogic: Too many Host Adapters " "detected\n", NULL); + blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL); #else - BusLogic_Error("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, Bus, Device); - BusLogic_Error("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, IO_Address, PCI_Address, IRQ_Channel); - BusLogic_Error("BusLogic: support was omitted in this kernel " "configuration.\n", NULL); + blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, bus, device); + blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch); + blogic_err("BusLogic: support was omitted in this kernel " "configuration.\n", NULL); #endif } /* @@ -940,13 +1005,13 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda increasing PCI Bus and Device Number, so sort the probe information into the same order the BIOS uses. */ - BusLogic_SortProbeInfo(&BusLogic_ProbeInfoList[FlashPointIndex], FlashPointCount); - return FlashPointCount; + blogic_sort_probeinfo(&blogic_probeinfo_list[fpindex], fpcount); + return fpcount; } /* - BusLogic_InitializeProbeInfoList initializes the list of I/O Address and Bus + blogic_init_probeinfo_list initializes the list of I/O Address and Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters by interrogating the PCI Configuration Space on PCI machines as well as from the list of standard BusLogic MultiMaster ISA I/O Addresses. By default, if both @@ -958,104 +1023,125 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda a particular probe order. */ -static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter - *PrototypeHostAdapter) +static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter) { /* - If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint - Host Adapters; otherwise, default to the standard ISA MultiMaster probe. - */ - if (!BusLogic_ProbeOptions.NoProbePCI) { - if (BusLogic_ProbeOptions.MultiMasterFirst) { - BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter); - BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter); - } else if (BusLogic_ProbeOptions.FlashPointFirst) { - BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter); - BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter); + If a PCI BIOS is present, interrogate it for MultiMaster and + FlashPoint Host Adapters; otherwise, default to the standard + ISA MultiMaster probe. + */ + if (!blogic_probe_options.noprobe_pci) { + if (blogic_probe_options.multimaster_first) { + blogic_init_mm_probeinfo(adapter); + blogic_init_fp_probeinfo(adapter); + } else if (blogic_probe_options.flashpoint_first) { + blogic_init_fp_probeinfo(adapter); + blogic_init_mm_probeinfo(adapter); } else { - int FlashPointCount = BusLogic_InitializeFlashPointProbeInfo(PrototypeHostAdapter); - int PCIMultiMasterCount = BusLogic_InitializeMultiMasterProbeInfo(PrototypeHostAdapter); - if (FlashPointCount > 0 && PCIMultiMasterCount > 0) { - struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[FlashPointCount]; - struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter; - struct BusLogic_FetchHostAdapterLocalRAMRequest FetchHostAdapterLocalRAMRequest; - struct BusLogic_BIOSDriveMapByte Drive0MapByte; - while (ProbeInfo->HostAdapterBusType != BusLogic_PCI_Bus) - ProbeInfo++; - HostAdapter->IO_Address = ProbeInfo->IO_Address; - FetchHostAdapterLocalRAMRequest.ByteOffset = BusLogic_BIOS_BaseOffset + BusLogic_BIOS_DriveMapOffset + 0; - FetchHostAdapterLocalRAMRequest.ByteCount = sizeof(Drive0MapByte); - BusLogic_Command(HostAdapter, BusLogic_FetchHostAdapterLocalRAM, &FetchHostAdapterLocalRAMRequest, sizeof(FetchHostAdapterLocalRAMRequest), &Drive0MapByte, sizeof(Drive0MapByte)); + int fpcount = blogic_init_fp_probeinfo(adapter); + int mmcount = blogic_init_mm_probeinfo(adapter); + if (fpcount > 0 && mmcount > 0) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[fpcount]; + struct blogic_adapter *myadapter = adapter; + struct blogic_fetch_localram fetch_localram; + struct blogic_bios_drvmap d0_mapbyte; + + while (probeinfo->adapter_bus_type != + BLOGIC_PCI_BUS) + probeinfo++; + myadapter->io_addr = probeinfo->io_addr; + fetch_localram.offset = + BLOGIC_BIOS_BASE + BLOGIC_BIOS_DRVMAP; + fetch_localram.count = sizeof(d0_mapbyte); + blogic_cmd(myadapter, BLOGIC_FETCH_LOCALRAM, + &fetch_localram, + sizeof(fetch_localram), + &d0_mapbyte, + sizeof(d0_mapbyte)); /* - If the Map Byte for BIOS Drive 0 indicates that BIOS Drive 0 - is controlled by this PCI MultiMaster Host Adapter, then - reverse the probe order so that MultiMaster Host Adapters are - probed before FlashPoint Host Adapters. + If the Map Byte for BIOS Drive 0 indicates + that BIOS Drive 0 is controlled by this + PCI MultiMaster Host Adapter, then reverse + the probe order so that MultiMaster Host + Adapters are probed before FlashPoint Host + Adapters. */ - if (Drive0MapByte.DiskGeometry != BusLogic_BIOS_Disk_Not_Installed) { - struct BusLogic_ProbeInfo SavedProbeInfo[BusLogic_MaxHostAdapters]; - int MultiMasterCount = BusLogic_ProbeInfoCount - FlashPointCount; - memcpy(SavedProbeInfo, BusLogic_ProbeInfoList, BusLogic_ProbeInfoCount * sizeof(struct BusLogic_ProbeInfo)); - memcpy(&BusLogic_ProbeInfoList[0], &SavedProbeInfo[FlashPointCount], MultiMasterCount * sizeof(struct BusLogic_ProbeInfo)); - memcpy(&BusLogic_ProbeInfoList[MultiMasterCount], &SavedProbeInfo[0], FlashPointCount * sizeof(struct BusLogic_ProbeInfo)); + if (d0_mapbyte.diskgeom != BLOGIC_BIOS_NODISK) { + struct blogic_probeinfo saved_probeinfo[BLOGIC_MAX_ADAPTERS]; + int mmcount = blogic_probeinfo_count - fpcount; + + memcpy(saved_probeinfo, + blogic_probeinfo_list, + blogic_probeinfo_count * sizeof(struct blogic_probeinfo)); + memcpy(&blogic_probeinfo_list[0], + &saved_probeinfo[fpcount], + mmcount * sizeof(struct blogic_probeinfo)); + memcpy(&blogic_probeinfo_list[mmcount], + &saved_probeinfo[0], + fpcount * sizeof(struct blogic_probeinfo)); } } } - } else - BusLogic_InitializeProbeInfoListISA(PrototypeHostAdapter); + } else { + blogic_init_probeinfo_isa(adapter); + } } #else -#define BusLogic_InitializeProbeInfoList(adapter) \ - BusLogic_InitializeProbeInfoListISA(adapter) +#define blogic_init_probeinfo_list(adapter) \ + blogic_init_probeinfo_isa(adapter) #endif /* CONFIG_PCI */ /* - BusLogic_Failure prints a standardized error message, and then returns false. + blogic_failure prints a standardized error message, and then returns false. */ -static bool BusLogic_Failure(struct BusLogic_HostAdapter *HostAdapter, char *ErrorMessage) +static bool blogic_failure(struct blogic_adapter *adapter, char *msg) { - BusLogic_AnnounceDriver(HostAdapter); - if (HostAdapter->HostAdapterBusType == BusLogic_PCI_Bus) { - BusLogic_Error("While configuring BusLogic PCI Host Adapter at\n", HostAdapter); - BusLogic_Error("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", HostAdapter, HostAdapter->Bus, HostAdapter->Device, HostAdapter->IO_Address, HostAdapter->PCI_Address); + blogic_announce_drvr(adapter); + if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) { + blogic_err("While configuring BusLogic PCI Host Adapter at\n", + adapter); + blogic_err("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr); } else - BusLogic_Error("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", HostAdapter, HostAdapter->IO_Address); - BusLogic_Error("%s FAILED - DETACHING\n", HostAdapter, ErrorMessage); - if (BusLogic_CommandFailureReason != NULL) - BusLogic_Error("ADDITIONAL FAILURE INFO - %s\n", HostAdapter, BusLogic_CommandFailureReason); + blogic_err("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", adapter, adapter->io_addr); + blogic_err("%s FAILED - DETACHING\n", adapter, msg); + if (blogic_cmd_failure_reason != NULL) + blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter, + blogic_cmd_failure_reason); return false; } /* - BusLogic_ProbeHostAdapter probes for a BusLogic Host Adapter. + blogic_probe probes for a BusLogic Host Adapter. */ -static bool __init BusLogic_ProbeHostAdapter(struct BusLogic_HostAdapter *HostAdapter) +static bool __init blogic_probe(struct blogic_adapter *adapter) { - union BusLogic_StatusRegister StatusRegister; - union BusLogic_InterruptRegister InterruptRegister; - union BusLogic_GeometryRegister GeometryRegister; + union blogic_stat_reg statusreg; + union blogic_int_reg intreg; + union blogic_geo_reg georeg; /* FlashPoint Host Adapters are Probed by the FlashPoint SCCB Manager. */ - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) { - struct FlashPoint_Info *FlashPointInfo = &HostAdapter->FlashPointInfo; - FlashPointInfo->BaseAddress = (u32) HostAdapter->IO_Address; - FlashPointInfo->IRQ_Channel = HostAdapter->IRQ_Channel; - FlashPointInfo->Present = false; - if (!(FlashPoint_ProbeHostAdapter(FlashPointInfo) == 0 && FlashPointInfo->Present)) { - BusLogic_Error("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", HostAdapter, HostAdapter->Bus, HostAdapter->Device); - BusLogic_Error("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", HostAdapter, HostAdapter->IO_Address, HostAdapter->PCI_Address); - BusLogic_Error("BusLogic: Probe Function failed to validate it.\n", HostAdapter); + if (blogic_flashpoint_type(adapter)) { + struct fpoint_info *fpinfo = &adapter->fpinfo; + fpinfo->base_addr = (u32) adapter->io_addr; + fpinfo->irq_ch = adapter->irq_ch; + fpinfo->present = false; + if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 && + fpinfo->present)) { + blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev); + blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr); + blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter); return false; } - if (BusLogic_GlobalOptions.TraceProbe) - BusLogic_Notice("BusLogic_Probe(0x%X): FlashPoint Found\n", HostAdapter, HostAdapter->IO_Address); + if (blogic_global_options.trace_probe) + blogic_notice("BusLogic_Probe(0x%X): FlashPoint Found\n", adapter, adapter->io_addr); /* Indicate the Host Adapter Probe completed successfully. */ @@ -1068,28 +1154,32 @@ static bool __init BusLogic_ProbeHostAdapter(struct BusLogic_HostAdapter *HostAd case there is definitely no BusLogic Host Adapter at this base I/O Address. The test here is a subset of that used by the BusLogic Host Adapter BIOS. */ - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter); - GeometryRegister.All = BusLogic_ReadGeometryRegister(HostAdapter); - if (BusLogic_GlobalOptions.TraceProbe) - BusLogic_Notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", HostAdapter, HostAdapter->IO_Address, StatusRegister.All, InterruptRegister.All, GeometryRegister.All); - if (StatusRegister.All == 0 || StatusRegister.sr.DiagnosticActive || StatusRegister.sr.CommandParameterRegisterBusy || StatusRegister.sr.Reserved || StatusRegister.sr.CommandInvalid || InterruptRegister.ir.Reserved != 0) + statusreg.all = blogic_rdstatus(adapter); + intreg.all = blogic_rdint(adapter); + georeg.all = blogic_rdgeom(adapter); + if (blogic_global_options.trace_probe) + blogic_notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all); + if (statusreg.all == 0 || statusreg.sr.diag_active || + statusreg.sr.cmd_param_busy || statusreg.sr.rsvd || + statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0) return false; /* - Check the undocumented Geometry Register to test if there is an I/O port - that responded. Adaptec Host Adapters do not implement the Geometry - Register, so this test helps serve to avoid incorrectly recognizing an - Adaptec 1542A or 1542B as a BusLogic. Unfortunately, the Adaptec 1542C - series does respond to the Geometry Register I/O port, but it will be - rejected later when the Inquire Extended Setup Information command is - issued in BusLogic_CheckHostAdapter. The AMI FastDisk Host Adapter is a - BusLogic clone that implements the same interface as earlier BusLogic - Host Adapters, including the undocumented commands, and is therefore - supported by this driver. However, the AMI FastDisk always returns 0x00 - upon reading the Geometry Register, so the extended translation option - should always be left disabled on the AMI FastDisk. - */ - if (GeometryRegister.All == 0xFF) + Check the undocumented Geometry Register to test if there is + an I/O port that responded. Adaptec Host Adapters do not + implement the Geometry Register, so this test helps serve to + avoid incorrectly recognizing an Adaptec 1542A or 1542B as a + BusLogic. Unfortunately, the Adaptec 1542C series does respond + to the Geometry Register I/O port, but it will be rejected + later when the Inquire Extended Setup Information command is + issued in blogic_checkadapter. The AMI FastDisk Host Adapter + is a BusLogic clone that implements the same interface as + earlier BusLogic Host Adapters, including the undocumented + commands, and is therefore supported by this driver. However, + the AMI FastDisk always returns 0x00 upon reading the Geometry + Register, so the extended translation option should always be + left disabled on the AMI FastDisk. + */ + if (georeg.all == 0xFF) return false; /* Indicate the Host Adapter Probe completed successfully. @@ -1099,27 +1189,28 @@ static bool __init BusLogic_ProbeHostAdapter(struct BusLogic_HostAdapter *HostAd /* - BusLogic_HardwareResetHostAdapter issues a Hardware Reset to the Host Adapter - and waits for Host Adapter Diagnostics to complete. If HardReset is true, a + blogic_hwreset issues a Hardware Reset to the Host Adapter + and waits for Host Adapter Diagnostics to complete. If hard_reset is true, a Hard Reset is performed which also initiates a SCSI Bus Reset. Otherwise, a Soft Reset is performed which only resets the Host Adapter without forcing a SCSI Bus Reset. */ -static bool BusLogic_HardwareResetHostAdapter(struct BusLogic_HostAdapter - *HostAdapter, bool HardReset) +static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) { - union BusLogic_StatusRegister StatusRegister; - int TimeoutCounter; + union blogic_stat_reg statusreg; + int timeout; /* - FlashPoint Host Adapters are Hard Reset by the FlashPoint SCCB Manager. + FlashPoint Host Adapters are Hard Reset by the FlashPoint + SCCB Manager. */ - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) { - struct FlashPoint_Info *FlashPointInfo = &HostAdapter->FlashPointInfo; - FlashPointInfo->HostSoftReset = !HardReset; - FlashPointInfo->ReportDataUnderrun = true; - HostAdapter->CardHandle = FlashPoint_HardwareResetHostAdapter(FlashPointInfo); - if (HostAdapter->CardHandle == FlashPoint_BadCardHandle) + if (blogic_flashpoint_type(adapter)) { + struct fpoint_info *fpinfo = &adapter->fpinfo; + fpinfo->softreset = !hard_reset; + fpinfo->report_underrun = true; + adapter->cardhandle = + FlashPoint_HardwareResetHostAdapter(fpinfo); + if (adapter->cardhandle == (void *)FPOINT_BADCARD_HANDLE) return false; /* Indicate the Host Adapter Hard Reset completed successfully. @@ -1127,26 +1218,27 @@ static bool BusLogic_HardwareResetHostAdapter(struct BusLogic_HostAdapter return true; } /* - Issue a Hard Reset or Soft Reset Command to the Host Adapter. The Host - Adapter should respond by setting Diagnostic Active in the Status Register. + Issue a Hard Reset or Soft Reset Command to the Host Adapter. + The Host Adapter should respond by setting Diagnostic Active in + the Status Register. */ - if (HardReset) - BusLogic_HardReset(HostAdapter); + if (hard_reset) + blogic_hardreset(adapter); else - BusLogic_SoftReset(HostAdapter); + blogic_softreset(adapter); /* Wait until Diagnostic Active is set in the Status Register. */ - TimeoutCounter = 5 * 10000; - while (--TimeoutCounter >= 0) { - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (StatusRegister.sr.DiagnosticActive) + timeout = 5 * 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.diag_active) break; udelay(100); } - if (BusLogic_GlobalOptions.TraceHardwareReset) - BusLogic_Notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", HostAdapter, HostAdapter->IO_Address, StatusRegister.All); - if (TimeoutCounter < 0) + if (blogic_global_options.trace_hw_reset) + blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + if (timeout < 0) return false; /* Wait 100 microseconds to allow completion of any initial diagnostic @@ -1157,45 +1249,47 @@ static bool BusLogic_HardwareResetHostAdapter(struct BusLogic_HostAdapter /* Wait until Diagnostic Active is reset in the Status Register. */ - TimeoutCounter = 10 * 10000; - while (--TimeoutCounter >= 0) { - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (!StatusRegister.sr.DiagnosticActive) + timeout = 10 * 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (!statusreg.sr.diag_active) break; udelay(100); } - if (BusLogic_GlobalOptions.TraceHardwareReset) - BusLogic_Notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", HostAdapter, HostAdapter->IO_Address, StatusRegister.All); - if (TimeoutCounter < 0) + if (blogic_global_options.trace_hw_reset) + blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + if (timeout < 0) return false; /* - Wait until at least one of the Diagnostic Failure, Host Adapter Ready, - or Data In Register Ready bits is set in the Status Register. + Wait until at least one of the Diagnostic Failure, Host Adapter + Ready, or Data In Register Ready bits is set in the Status Register. */ - TimeoutCounter = 10000; - while (--TimeoutCounter >= 0) { - StatusRegister.All = BusLogic_ReadStatusRegister(HostAdapter); - if (StatusRegister.sr.DiagnosticFailure || StatusRegister.sr.HostAdapterReady || StatusRegister.sr.DataInRegisterReady) + timeout = 10000; + while (--timeout >= 0) { + statusreg.all = blogic_rdstatus(adapter); + if (statusreg.sr.diag_failed || statusreg.sr.adapter_ready || + statusreg.sr.datain_ready) break; udelay(100); } - if (BusLogic_GlobalOptions.TraceHardwareReset) - BusLogic_Notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", HostAdapter, HostAdapter->IO_Address, StatusRegister.All); - if (TimeoutCounter < 0) + if (blogic_global_options.trace_hw_reset) + blogic_notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + if (timeout < 0) return false; /* - If Diagnostic Failure is set or Host Adapter Ready is reset, then an - error occurred during the Host Adapter diagnostics. If Data In Register - Ready is set, then there is an Error Code available. - */ - if (StatusRegister.sr.DiagnosticFailure || !StatusRegister.sr.HostAdapterReady) { - BusLogic_CommandFailureReason = NULL; - BusLogic_Failure(HostAdapter, "HARD RESET DIAGNOSTICS"); - BusLogic_Error("HOST ADAPTER STATUS REGISTER = %02X\n", HostAdapter, StatusRegister.All); - if (StatusRegister.sr.DataInRegisterReady) { - unsigned char ErrorCode = BusLogic_ReadDataInRegister(HostAdapter); - BusLogic_Error("HOST ADAPTER ERROR CODE = %d\n", HostAdapter, ErrorCode); - } + If Diagnostic Failure is set or Host Adapter Ready is reset, + then an error occurred during the Host Adapter diagnostics. + If Data In Register Ready is set, then there is an Error Code + available. + */ + if (statusreg.sr.diag_failed || !statusreg.sr.adapter_ready) { + blogic_cmd_failure_reason = NULL; + blogic_failure(adapter, "HARD RESET DIAGNOSTICS"); + blogic_err("HOST ADAPTER STATUS REGISTER = %02X\n", adapter, + statusreg.all); + if (statusreg.sr.datain_ready) + blogic_err("HOST ADAPTER ERROR CODE = %d\n", adapter, + blogic_rddatain(adapter)); return false; } /* @@ -1206,161 +1300,175 @@ static bool BusLogic_HardwareResetHostAdapter(struct BusLogic_HostAdapter /* - BusLogic_CheckHostAdapter checks to be sure this really is a BusLogic + blogic_checkadapter checks to be sure this really is a BusLogic Host Adapter. */ -static bool __init BusLogic_CheckHostAdapter(struct BusLogic_HostAdapter *HostAdapter) +static bool __init blogic_checkadapter(struct blogic_adapter *adapter) { - struct BusLogic_ExtendedSetupInformation ExtendedSetupInformation; - unsigned char RequestedReplyLength; - bool Result = true; + struct blogic_ext_setup ext_setupinfo; + unsigned char req_replylen; + bool result = true; /* FlashPoint Host Adapters do not require this protection. */ - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) + if (blogic_flashpoint_type(adapter)) return true; /* - Issue the Inquire Extended Setup Information command. Only genuine - BusLogic Host Adapters and true clones support this command. Adaptec 1542C - series Host Adapters that respond to the Geometry Register I/O port will - fail this command. + Issue the Inquire Extended Setup Information command. Only genuine + BusLogic Host Adapters and true clones support this command. + Adaptec 1542C series Host Adapters that respond to the Geometry + Register I/O port will fail this command. */ - RequestedReplyLength = sizeof(ExtendedSetupInformation); - if (BusLogic_Command(HostAdapter, BusLogic_InquireExtendedSetupInformation, &RequestedReplyLength, sizeof(RequestedReplyLength), &ExtendedSetupInformation, sizeof(ExtendedSetupInformation)) - != sizeof(ExtendedSetupInformation)) - Result = false; + req_replylen = sizeof(ext_setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen, + sizeof(req_replylen), &ext_setupinfo, + sizeof(ext_setupinfo)) != sizeof(ext_setupinfo)) + result = false; /* Provide tracing information if requested and return. */ - if (BusLogic_GlobalOptions.TraceProbe) - BusLogic_Notice("BusLogic_Check(0x%X): MultiMaster %s\n", HostAdapter, HostAdapter->IO_Address, (Result ? "Found" : "Not Found")); - return Result; + if (blogic_global_options.trace_probe) + blogic_notice("BusLogic_Check(0x%X): MultiMaster %s\n", adapter, + adapter->io_addr, + (result ? "Found" : "Not Found")); + return result; } /* - BusLogic_ReadHostAdapterConfiguration reads the Configuration Information + blogic_rdconfig reads the Configuration Information from Host Adapter and initializes the Host Adapter structure. */ -static bool __init BusLogic_ReadHostAdapterConfiguration(struct BusLogic_HostAdapter - *HostAdapter) +static bool __init blogic_rdconfig(struct blogic_adapter *adapter) { - struct BusLogic_BoardID BoardID; - struct BusLogic_Configuration Configuration; - struct BusLogic_SetupInformation SetupInformation; - struct BusLogic_ExtendedSetupInformation ExtendedSetupInformation; - unsigned char HostAdapterModelNumber[5]; - unsigned char FirmwareVersion3rdDigit; - unsigned char FirmwareVersionLetter; - struct BusLogic_PCIHostAdapterInformation PCIHostAdapterInformation; - struct BusLogic_FetchHostAdapterLocalRAMRequest FetchHostAdapterLocalRAMRequest; - struct BusLogic_AutoSCSIData AutoSCSIData; - union BusLogic_GeometryRegister GeometryRegister; - unsigned char RequestedReplyLength; - unsigned char *TargetPointer, Character; - int TargetID, i; - /* - Configuration Information for FlashPoint Host Adapters is provided in the - FlashPoint_Info structure by the FlashPoint SCCB Manager's Probe Function. - Initialize fields in the Host Adapter structure from the FlashPoint_Info - structure. - */ - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) { - struct FlashPoint_Info *FlashPointInfo = &HostAdapter->FlashPointInfo; - TargetPointer = HostAdapter->ModelName; - *TargetPointer++ = 'B'; - *TargetPointer++ = 'T'; - *TargetPointer++ = '-'; - for (i = 0; i < sizeof(FlashPointInfo->ModelNumber); i++) - *TargetPointer++ = FlashPointInfo->ModelNumber[i]; - *TargetPointer++ = '\0'; - strcpy(HostAdapter->FirmwareVersion, FlashPoint_FirmwareVersion); - HostAdapter->SCSI_ID = FlashPointInfo->SCSI_ID; - HostAdapter->ExtendedTranslationEnabled = FlashPointInfo->ExtendedTranslationEnabled; - HostAdapter->ParityCheckingEnabled = FlashPointInfo->ParityCheckingEnabled; - HostAdapter->BusResetEnabled = !FlashPointInfo->HostSoftReset; - HostAdapter->LevelSensitiveInterrupt = true; - HostAdapter->HostWideSCSI = FlashPointInfo->HostWideSCSI; - HostAdapter->HostDifferentialSCSI = false; - HostAdapter->HostSupportsSCAM = true; - HostAdapter->HostUltraSCSI = true; - HostAdapter->ExtendedLUNSupport = true; - HostAdapter->TerminationInfoValid = true; - HostAdapter->LowByteTerminated = FlashPointInfo->LowByteTerminated; - HostAdapter->HighByteTerminated = FlashPointInfo->HighByteTerminated; - HostAdapter->SCAM_Enabled = FlashPointInfo->SCAM_Enabled; - HostAdapter->SCAM_Level2 = FlashPointInfo->SCAM_Level2; - HostAdapter->DriverScatterGatherLimit = BusLogic_ScatterGatherLimit; - HostAdapter->MaxTargetDevices = (HostAdapter->HostWideSCSI ? 16 : 8); - HostAdapter->MaxLogicalUnits = 32; - HostAdapter->InitialCCBs = 4 * BusLogic_CCB_AllocationGroupSize; - HostAdapter->IncrementalCCBs = BusLogic_CCB_AllocationGroupSize; - HostAdapter->DriverQueueDepth = 255; - HostAdapter->HostAdapterQueueDepth = HostAdapter->DriverQueueDepth; - HostAdapter->SynchronousPermitted = FlashPointInfo->SynchronousPermitted; - HostAdapter->FastPermitted = FlashPointInfo->FastPermitted; - HostAdapter->UltraPermitted = FlashPointInfo->UltraPermitted; - HostAdapter->WidePermitted = FlashPointInfo->WidePermitted; - HostAdapter->DisconnectPermitted = FlashPointInfo->DisconnectPermitted; - HostAdapter->TaggedQueuingPermitted = 0xFFFF; - goto Common; + struct blogic_board_id id; + struct blogic_config config; + struct blogic_setup_info setupinfo; + struct blogic_ext_setup ext_setupinfo; + unsigned char model[5]; + unsigned char fw_ver_digit3; + unsigned char fw_ver_letter; + struct blogic_adapter_info adapter_info; + struct blogic_fetch_localram fetch_localram; + struct blogic_autoscsi autoscsi; + union blogic_geo_reg georeg; + unsigned char req_replylen; + unsigned char *tgt, ch; + int tgt_id, i; + /* + Configuration Information for FlashPoint Host Adapters is + provided in the fpoint_info structure by the FlashPoint + SCCB Manager's Probe Function. Initialize fields in the + Host Adapter structure from the fpoint_info structure. + */ + if (blogic_flashpoint_type(adapter)) { + struct fpoint_info *fpinfo = &adapter->fpinfo; + tgt = adapter->model; + *tgt++ = 'B'; + *tgt++ = 'T'; + *tgt++ = '-'; + for (i = 0; i < sizeof(fpinfo->model); i++) + *tgt++ = fpinfo->model[i]; + *tgt++ = '\0'; + strcpy(adapter->fw_ver, FLASHPOINT_FW_VER); + adapter->scsi_id = fpinfo->scsi_id; + adapter->ext_trans_enable = fpinfo->ext_trans_enable; + adapter->parity = fpinfo->parity; + adapter->reset_enabled = !fpinfo->softreset; + adapter->level_int = true; + adapter->wide = fpinfo->wide; + adapter->differential = false; + adapter->scam = true; + adapter->ultra = true; + adapter->ext_lun = true; + adapter->terminfo_valid = true; + adapter->low_term = fpinfo->low_term; + adapter->high_term = fpinfo->high_term; + adapter->scam_enabled = fpinfo->scam_enabled; + adapter->scam_lev2 = fpinfo->scam_lev2; + adapter->drvr_sglimit = BLOGIC_SG_LIMIT; + adapter->maxdev = (adapter->wide ? 16 : 8); + adapter->maxlun = 32; + adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE; + adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE; + adapter->drvr_qdepth = 255; + adapter->adapter_qdepth = adapter->drvr_qdepth; + adapter->sync_ok = fpinfo->sync_ok; + adapter->fast_ok = fpinfo->fast_ok; + adapter->ultra_ok = fpinfo->ultra_ok; + adapter->wide_ok = fpinfo->wide_ok; + adapter->discon_ok = fpinfo->discon_ok; + adapter->tagq_ok = 0xFFFF; + goto common; } /* Issue the Inquire Board ID command. */ - if (BusLogic_Command(HostAdapter, BusLogic_InquireBoardID, NULL, 0, &BoardID, sizeof(BoardID)) != sizeof(BoardID)) - return BusLogic_Failure(HostAdapter, "INQUIRE BOARD ID"); + if (blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, + sizeof(id)) != sizeof(id)) + return blogic_failure(adapter, "INQUIRE BOARD ID"); /* Issue the Inquire Configuration command. */ - if (BusLogic_Command(HostAdapter, BusLogic_InquireConfiguration, NULL, 0, &Configuration, sizeof(Configuration)) - != sizeof(Configuration)) - return BusLogic_Failure(HostAdapter, "INQUIRE CONFIGURATION"); + if (blogic_cmd(adapter, BLOGIC_INQ_CONFIG, NULL, 0, &config, + sizeof(config)) + != sizeof(config)) + return blogic_failure(adapter, "INQUIRE CONFIGURATION"); /* Issue the Inquire Setup Information command. */ - RequestedReplyLength = sizeof(SetupInformation); - if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation, &RequestedReplyLength, sizeof(RequestedReplyLength), &SetupInformation, sizeof(SetupInformation)) - != sizeof(SetupInformation)) - return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION"); + req_replylen = sizeof(setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen, + sizeof(req_replylen), &setupinfo, + sizeof(setupinfo)) != sizeof(setupinfo)) + return blogic_failure(adapter, "INQUIRE SETUP INFORMATION"); /* Issue the Inquire Extended Setup Information command. */ - RequestedReplyLength = sizeof(ExtendedSetupInformation); - if (BusLogic_Command(HostAdapter, BusLogic_InquireExtendedSetupInformation, &RequestedReplyLength, sizeof(RequestedReplyLength), &ExtendedSetupInformation, sizeof(ExtendedSetupInformation)) - != sizeof(ExtendedSetupInformation)) - return BusLogic_Failure(HostAdapter, "INQUIRE EXTENDED SETUP INFORMATION"); + req_replylen = sizeof(ext_setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_EXTSETUP, &req_replylen, + sizeof(req_replylen), &ext_setupinfo, + sizeof(ext_setupinfo)) != sizeof(ext_setupinfo)) + return blogic_failure(adapter, + "INQUIRE EXTENDED SETUP INFORMATION"); /* Issue the Inquire Firmware Version 3rd Digit command. */ - FirmwareVersion3rdDigit = '\0'; - if (BoardID.FirmwareVersion1stDigit > '0') - if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersion3rdDigit, NULL, 0, &FirmwareVersion3rdDigit, sizeof(FirmwareVersion3rdDigit)) - != sizeof(FirmwareVersion3rdDigit)) - return BusLogic_Failure(HostAdapter, "INQUIRE FIRMWARE 3RD DIGIT"); + fw_ver_digit3 = '\0'; + if (id.fw_ver_digit1 > '0') + if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_D3, NULL, 0, + &fw_ver_digit3, + sizeof(fw_ver_digit3)) != sizeof(fw_ver_digit3)) + return blogic_failure(adapter, + "INQUIRE FIRMWARE 3RD DIGIT"); /* Issue the Inquire Host Adapter Model Number command. */ - if (ExtendedSetupInformation.BusType == 'A' && BoardID.FirmwareVersion1stDigit == '2') + if (ext_setupinfo.bus_type == 'A' && id.fw_ver_digit1 == '2') /* BusLogic BT-542B ISA 2.xx */ - strcpy(HostAdapterModelNumber, "542B"); - else if (ExtendedSetupInformation.BusType == 'E' && BoardID.FirmwareVersion1stDigit == '2' && (BoardID.FirmwareVersion2ndDigit <= '1' || (BoardID.FirmwareVersion2ndDigit == '2' && FirmwareVersion3rdDigit == '0'))) + strcpy(model, "542B"); + else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '2' && + (id.fw_ver_digit2 <= '1' || (id.fw_ver_digit2 == '2' && + fw_ver_digit3 == '0'))) /* BusLogic BT-742A EISA 2.1x or 2.20 */ - strcpy(HostAdapterModelNumber, "742A"); - else if (ExtendedSetupInformation.BusType == 'E' && BoardID.FirmwareVersion1stDigit == '0') + strcpy(model, "742A"); + else if (ext_setupinfo.bus_type == 'E' && id.fw_ver_digit1 == '0') /* AMI FastDisk EISA Series 441 0.x */ - strcpy(HostAdapterModelNumber, "747A"); + strcpy(model, "747A"); else { - RequestedReplyLength = sizeof(HostAdapterModelNumber); - if (BusLogic_Command(HostAdapter, BusLogic_InquireHostAdapterModelNumber, &RequestedReplyLength, sizeof(RequestedReplyLength), &HostAdapterModelNumber, sizeof(HostAdapterModelNumber)) - != sizeof(HostAdapterModelNumber)) - return BusLogic_Failure(HostAdapter, "INQUIRE HOST ADAPTER MODEL NUMBER"); + req_replylen = sizeof(model); + if (blogic_cmd(adapter, BLOGIC_INQ_MODELNO, &req_replylen, + sizeof(req_replylen), &model, + sizeof(model)) != sizeof(model)) + return blogic_failure(adapter, + "INQUIRE HOST ADAPTER MODEL NUMBER"); } /* - BusLogic MultiMaster Host Adapters can be identified by their model number - and the major version number of their firmware as follows: + BusLogic MultiMaster Host Adapters can be identified by their + model number and the major version number of their firmware + as follows: 5.xx BusLogic "W" Series Host Adapters: BT-948/958/958D @@ -1374,497 +1482,535 @@ static bool __init BusLogic_ReadHostAdapterConfiguration(struct BusLogic_HostAda 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter */ /* - Save the Model Name and Host Adapter Name in the Host Adapter structure. + Save the Model Name and Host Adapter Name in the Host Adapter + structure. */ - TargetPointer = HostAdapter->ModelName; - *TargetPointer++ = 'B'; - *TargetPointer++ = 'T'; - *TargetPointer++ = '-'; - for (i = 0; i < sizeof(HostAdapterModelNumber); i++) { - Character = HostAdapterModelNumber[i]; - if (Character == ' ' || Character == '\0') + tgt = adapter->model; + *tgt++ = 'B'; + *tgt++ = 'T'; + *tgt++ = '-'; + for (i = 0; i < sizeof(model); i++) { + ch = model[i]; + if (ch == ' ' || ch == '\0') break; - *TargetPointer++ = Character; + *tgt++ = ch; } - *TargetPointer++ = '\0'; + *tgt++ = '\0'; /* Save the Firmware Version in the Host Adapter structure. */ - TargetPointer = HostAdapter->FirmwareVersion; - *TargetPointer++ = BoardID.FirmwareVersion1stDigit; - *TargetPointer++ = '.'; - *TargetPointer++ = BoardID.FirmwareVersion2ndDigit; - if (FirmwareVersion3rdDigit != ' ' && FirmwareVersion3rdDigit != '\0') - *TargetPointer++ = FirmwareVersion3rdDigit; - *TargetPointer = '\0'; + tgt = adapter->fw_ver; + *tgt++ = id.fw_ver_digit1; + *tgt++ = '.'; + *tgt++ = id.fw_ver_digit2; + if (fw_ver_digit3 != ' ' && fw_ver_digit3 != '\0') + *tgt++ = fw_ver_digit3; + *tgt = '\0'; /* Issue the Inquire Firmware Version Letter command. */ - if (strcmp(HostAdapter->FirmwareVersion, "3.3") >= 0) { - if (BusLogic_Command(HostAdapter, BusLogic_InquireFirmwareVersionLetter, NULL, 0, &FirmwareVersionLetter, sizeof(FirmwareVersionLetter)) - != sizeof(FirmwareVersionLetter)) - return BusLogic_Failure(HostAdapter, "INQUIRE FIRMWARE VERSION LETTER"); - if (FirmwareVersionLetter != ' ' && FirmwareVersionLetter != '\0') - *TargetPointer++ = FirmwareVersionLetter; - *TargetPointer = '\0'; + if (strcmp(adapter->fw_ver, "3.3") >= 0) { + if (blogic_cmd(adapter, BLOGIC_INQ_FWVER_LETTER, NULL, 0, + &fw_ver_letter, + sizeof(fw_ver_letter)) != sizeof(fw_ver_letter)) + return blogic_failure(adapter, + "INQUIRE FIRMWARE VERSION LETTER"); + if (fw_ver_letter != ' ' && fw_ver_letter != '\0') + *tgt++ = fw_ver_letter; + *tgt = '\0'; } /* Save the Host Adapter SCSI ID in the Host Adapter structure. */ - HostAdapter->SCSI_ID = Configuration.HostAdapterID; - /* - Determine the Bus Type and save it in the Host Adapter structure, determine - and save the IRQ Channel if necessary, and determine and save the DMA - Channel for ISA Host Adapters. - */ - HostAdapter->HostAdapterBusType = BusLogic_HostAdapterBusTypes[HostAdapter->ModelName[3] - '4']; - if (HostAdapter->IRQ_Channel == 0) { - if (Configuration.IRQ_Channel9) - HostAdapter->IRQ_Channel = 9; - else if (Configuration.IRQ_Channel10) - HostAdapter->IRQ_Channel = 10; - else if (Configuration.IRQ_Channel11) - HostAdapter->IRQ_Channel = 11; - else if (Configuration.IRQ_Channel12) - HostAdapter->IRQ_Channel = 12; - else if (Configuration.IRQ_Channel14) - HostAdapter->IRQ_Channel = 14; - else if (Configuration.IRQ_Channel15) - HostAdapter->IRQ_Channel = 15; - } - if (HostAdapter->HostAdapterBusType == BusLogic_ISA_Bus) { - if (Configuration.DMA_Channel5) - HostAdapter->DMA_Channel = 5; - else if (Configuration.DMA_Channel6) - HostAdapter->DMA_Channel = 6; - else if (Configuration.DMA_Channel7) - HostAdapter->DMA_Channel = 7; + adapter->scsi_id = config.id; + /* + Determine the Bus Type and save it in the Host Adapter structure, + determine and save the IRQ Channel if necessary, and determine + and save the DMA Channel for ISA Host Adapters. + */ + adapter->adapter_bus_type = + blogic_adater_bus_types[adapter->model[3] - '4']; + if (adapter->irq_ch == 0) { + if (config.irq_ch9) + adapter->irq_ch = 9; + else if (config.irq_ch10) + adapter->irq_ch = 10; + else if (config.irq_ch11) + adapter->irq_ch = 11; + else if (config.irq_ch12) + adapter->irq_ch = 12; + else if (config.irq_ch14) + adapter->irq_ch = 14; + else if (config.irq_ch15) + adapter->irq_ch = 15; + } + if (adapter->adapter_bus_type == BLOGIC_ISA_BUS) { + if (config.dma_ch5) + adapter->dma_ch = 5; + else if (config.dma_ch6) + adapter->dma_ch = 6; + else if (config.dma_ch7) + adapter->dma_ch = 7; } /* Determine whether Extended Translation is enabled and save it in the Host Adapter structure. */ - GeometryRegister.All = BusLogic_ReadGeometryRegister(HostAdapter); - HostAdapter->ExtendedTranslationEnabled = GeometryRegister.gr.ExtendedTranslationEnabled; + georeg.all = blogic_rdgeom(adapter); + adapter->ext_trans_enable = georeg.gr.ext_trans_enable; /* Save the Scatter Gather Limits, Level Sensitive Interrupt flag, Wide SCSI flag, Differential SCSI flag, SCAM Supported flag, and Ultra SCSI flag in the Host Adapter structure. */ - HostAdapter->HostAdapterScatterGatherLimit = ExtendedSetupInformation.ScatterGatherLimit; - HostAdapter->DriverScatterGatherLimit = HostAdapter->HostAdapterScatterGatherLimit; - if (HostAdapter->HostAdapterScatterGatherLimit > BusLogic_ScatterGatherLimit) - HostAdapter->DriverScatterGatherLimit = BusLogic_ScatterGatherLimit; - if (ExtendedSetupInformation.Misc.LevelSensitiveInterrupt) - HostAdapter->LevelSensitiveInterrupt = true; - HostAdapter->HostWideSCSI = ExtendedSetupInformation.HostWideSCSI; - HostAdapter->HostDifferentialSCSI = ExtendedSetupInformation.HostDifferentialSCSI; - HostAdapter->HostSupportsSCAM = ExtendedSetupInformation.HostSupportsSCAM; - HostAdapter->HostUltraSCSI = ExtendedSetupInformation.HostUltraSCSI; + adapter->adapter_sglimit = ext_setupinfo.sg_limit; + adapter->drvr_sglimit = adapter->adapter_sglimit; + if (adapter->adapter_sglimit > BLOGIC_SG_LIMIT) + adapter->drvr_sglimit = BLOGIC_SG_LIMIT; + if (ext_setupinfo.misc.level_int) + adapter->level_int = true; + adapter->wide = ext_setupinfo.wide; + adapter->differential = ext_setupinfo.differential; + adapter->scam = ext_setupinfo.scam; + adapter->ultra = ext_setupinfo.ultra; /* Determine whether Extended LUN Format CCBs are supported and save the information in the Host Adapter structure. */ - if (HostAdapter->FirmwareVersion[0] == '5' || (HostAdapter->FirmwareVersion[0] == '4' && HostAdapter->HostWideSCSI)) - HostAdapter->ExtendedLUNSupport = true; + if (adapter->fw_ver[0] == '5' || (adapter->fw_ver[0] == '4' && + adapter->wide)) + adapter->ext_lun = true; /* Issue the Inquire PCI Host Adapter Information command to read the Termination Information from "W" series MultiMaster Host Adapters. */ - if (HostAdapter->FirmwareVersion[0] == '5') { - if (BusLogic_Command(HostAdapter, BusLogic_InquirePCIHostAdapterInformation, NULL, 0, &PCIHostAdapterInformation, sizeof(PCIHostAdapterInformation)) - != sizeof(PCIHostAdapterInformation)) - return BusLogic_Failure(HostAdapter, "INQUIRE PCI HOST ADAPTER INFORMATION"); + if (adapter->fw_ver[0] == '5') { + if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, + &adapter_info, + sizeof(adapter_info)) != sizeof(adapter_info)) + return blogic_failure(adapter, + "INQUIRE PCI HOST ADAPTER INFORMATION"); /* - Save the Termination Information in the Host Adapter structure. + Save the Termination Information in the Host Adapter + structure. */ - if (PCIHostAdapterInformation.GenericInfoValid) { - HostAdapter->TerminationInfoValid = true; - HostAdapter->LowByteTerminated = PCIHostAdapterInformation.LowByteTerminated; - HostAdapter->HighByteTerminated = PCIHostAdapterInformation.HighByteTerminated; + if (adapter_info.genericinfo_valid) { + adapter->terminfo_valid = true; + adapter->low_term = adapter_info.low_term; + adapter->high_term = adapter_info.high_term; } } /* - Issue the Fetch Host Adapter Local RAM command to read the AutoSCSI data - from "W" and "C" series MultiMaster Host Adapters. + Issue the Fetch Host Adapter Local RAM command to read the + AutoSCSI data from "W" and "C" series MultiMaster Host Adapters. */ - if (HostAdapter->FirmwareVersion[0] >= '4') { - FetchHostAdapterLocalRAMRequest.ByteOffset = BusLogic_AutoSCSI_BaseOffset; - FetchHostAdapterLocalRAMRequest.ByteCount = sizeof(AutoSCSIData); - if (BusLogic_Command(HostAdapter, BusLogic_FetchHostAdapterLocalRAM, &FetchHostAdapterLocalRAMRequest, sizeof(FetchHostAdapterLocalRAMRequest), &AutoSCSIData, sizeof(AutoSCSIData)) - != sizeof(AutoSCSIData)) - return BusLogic_Failure(HostAdapter, "FETCH HOST ADAPTER LOCAL RAM"); + if (adapter->fw_ver[0] >= '4') { + fetch_localram.offset = BLOGIC_AUTOSCSI_BASE; + fetch_localram.count = sizeof(autoscsi); + if (blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, &fetch_localram, + sizeof(fetch_localram), &autoscsi, + sizeof(autoscsi)) != sizeof(autoscsi)) + return blogic_failure(adapter, + "FETCH HOST ADAPTER LOCAL RAM"); /* - Save the Parity Checking Enabled, Bus Reset Enabled, and Termination - Information in the Host Adapter structure. + Save the Parity Checking Enabled, Bus Reset Enabled, + and Termination Information in the Host Adapter structure. */ - HostAdapter->ParityCheckingEnabled = AutoSCSIData.ParityCheckingEnabled; - HostAdapter->BusResetEnabled = AutoSCSIData.BusResetEnabled; - if (HostAdapter->FirmwareVersion[0] == '4') { - HostAdapter->TerminationInfoValid = true; - HostAdapter->LowByteTerminated = AutoSCSIData.LowByteTerminated; - HostAdapter->HighByteTerminated = AutoSCSIData.HighByteTerminated; + adapter->parity = autoscsi.parity; + adapter->reset_enabled = autoscsi.reset_enabled; + if (adapter->fw_ver[0] == '4') { + adapter->terminfo_valid = true; + adapter->low_term = autoscsi.low_term; + adapter->high_term = autoscsi.high_term; } /* - Save the Wide Permitted, Fast Permitted, Synchronous Permitted, - Disconnect Permitted, Ultra Permitted, and SCAM Information in the - Host Adapter structure. + Save the Wide Permitted, Fast Permitted, Synchronous + Permitted, Disconnect Permitted, Ultra Permitted, and + SCAM Information in the Host Adapter structure. */ - HostAdapter->WidePermitted = AutoSCSIData.WidePermitted; - HostAdapter->FastPermitted = AutoSCSIData.FastPermitted; - HostAdapter->SynchronousPermitted = AutoSCSIData.SynchronousPermitted; - HostAdapter->DisconnectPermitted = AutoSCSIData.DisconnectPermitted; - if (HostAdapter->HostUltraSCSI) - HostAdapter->UltraPermitted = AutoSCSIData.UltraPermitted; - if (HostAdapter->HostSupportsSCAM) { - HostAdapter->SCAM_Enabled = AutoSCSIData.SCAM_Enabled; - HostAdapter->SCAM_Level2 = AutoSCSIData.SCAM_Level2; + adapter->wide_ok = autoscsi.wide_ok; + adapter->fast_ok = autoscsi.fast_ok; + adapter->sync_ok = autoscsi.sync_ok; + adapter->discon_ok = autoscsi.discon_ok; + if (adapter->ultra) + adapter->ultra_ok = autoscsi.ultra_ok; + if (adapter->scam) { + adapter->scam_enabled = autoscsi.scam_enabled; + adapter->scam_lev2 = autoscsi.scam_lev2; } } /* - Initialize fields in the Host Adapter structure for "S" and "A" series - MultiMaster Host Adapters. + Initialize fields in the Host Adapter structure for "S" and "A" + series MultiMaster Host Adapters. */ - if (HostAdapter->FirmwareVersion[0] < '4') { - if (SetupInformation.SynchronousInitiationEnabled) { - HostAdapter->SynchronousPermitted = 0xFF; - if (HostAdapter->HostAdapterBusType == BusLogic_EISA_Bus) { - if (ExtendedSetupInformation.Misc.FastOnEISA) - HostAdapter->FastPermitted = 0xFF; - if (strcmp(HostAdapter->ModelName, "BT-757") == 0) - HostAdapter->WidePermitted = 0xFF; + if (adapter->fw_ver[0] < '4') { + if (setupinfo.sync) { + adapter->sync_ok = 0xFF; + if (adapter->adapter_bus_type == BLOGIC_EISA_BUS) { + if (ext_setupinfo.misc.fast_on_eisa) + adapter->fast_ok = 0xFF; + if (strcmp(adapter->model, "BT-757") == 0) + adapter->wide_ok = 0xFF; } } - HostAdapter->DisconnectPermitted = 0xFF; - HostAdapter->ParityCheckingEnabled = SetupInformation.ParityCheckingEnabled; - HostAdapter->BusResetEnabled = true; + adapter->discon_ok = 0xFF; + adapter->parity = setupinfo.parity; + adapter->reset_enabled = true; } /* - Determine the maximum number of Target IDs and Logical Units supported by - this driver for Wide and Narrow Host Adapters. + Determine the maximum number of Target IDs and Logical Units + supported by this driver for Wide and Narrow Host Adapters. */ - HostAdapter->MaxTargetDevices = (HostAdapter->HostWideSCSI ? 16 : 8); - HostAdapter->MaxLogicalUnits = (HostAdapter->ExtendedLUNSupport ? 32 : 8); + adapter->maxdev = (adapter->wide ? 16 : 8); + adapter->maxlun = (adapter->ext_lun ? 32 : 8); /* Select appropriate values for the Mailbox Count, Driver Queue Depth, - Initial CCBs, and Incremental CCBs variables based on whether or not Strict - Round Robin Mode is supported. If Strict Round Robin Mode is supported, - then there is no performance degradation in using the maximum possible - number of Outgoing and Incoming Mailboxes and allowing the Tagged and - Untagged Queue Depths to determine the actual utilization. If Strict Round - Robin Mode is not supported, then the Host Adapter must scan all the - Outgoing Mailboxes whenever an Outgoing Mailbox entry is made, which can - cause a substantial performance penalty. The host adapters actually have - room to store the following number of CCBs internally; that is, they can - internally queue and manage this many active commands on the SCSI bus - simultaneously. Performance measurements demonstrate that the Driver Queue - Depth should be set to the Mailbox Count, rather than the Host Adapter - Queue Depth (internal CCB capacity), as it is more efficient to have the - queued commands waiting in Outgoing Mailboxes if necessary than to block - the process in the higher levels of the SCSI Subsystem. + Initial CCBs, and Incremental CCBs variables based on whether + or not Strict Round Robin Mode is supported. If Strict Round + Robin Mode is supported, then there is no performance degradation + in using the maximum possible number of Outgoing and Incoming + Mailboxes and allowing the Tagged and Untagged Queue Depths to + determine the actual utilization. If Strict Round Robin Mode is + not supported, then the Host Adapter must scan all the Outgoing + Mailboxes whenever an Outgoing Mailbox entry is made, which can + cause a substantial performance penalty. The host adapters + actually have room to store the following number of CCBs + internally; that is, they can internally queue and manage this + many active commands on the SCSI bus simultaneously. Performance + measurements demonstrate that the Driver Queue Depth should be + set to the Mailbox Count, rather than the Host Adapter Queue + Depth (internal CCB capacity), as it is more efficient to have the + queued commands waiting in Outgoing Mailboxes if necessary than + to block the process in the higher levels of the SCSI Subsystem. 192 BT-948/958/958D 100 BT-946C/956C/956CD/747C/757C/757CD/445C 50 BT-545C/540CF 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A */ - if (HostAdapter->FirmwareVersion[0] == '5') - HostAdapter->HostAdapterQueueDepth = 192; - else if (HostAdapter->FirmwareVersion[0] == '4') - HostAdapter->HostAdapterQueueDepth = (HostAdapter->HostAdapterBusType != BusLogic_ISA_Bus ? 100 : 50); + if (adapter->fw_ver[0] == '5') + adapter->adapter_qdepth = 192; + else if (adapter->fw_ver[0] == '4') + adapter->adapter_qdepth = (adapter->adapter_bus_type != + BLOGIC_ISA_BUS ? 100 : 50); else - HostAdapter->HostAdapterQueueDepth = 30; - if (strcmp(HostAdapter->FirmwareVersion, "3.31") >= 0) { - HostAdapter->StrictRoundRobinModeSupport = true; - HostAdapter->MailboxCount = BusLogic_MaxMailboxes; + adapter->adapter_qdepth = 30; + if (strcmp(adapter->fw_ver, "3.31") >= 0) { + adapter->strict_rr = true; + adapter->mbox_count = BLOGIC_MAX_MAILBOX; } else { - HostAdapter->StrictRoundRobinModeSupport = false; - HostAdapter->MailboxCount = 32; + adapter->strict_rr = false; + adapter->mbox_count = 32; } - HostAdapter->DriverQueueDepth = HostAdapter->MailboxCount; - HostAdapter->InitialCCBs = 4 * BusLogic_CCB_AllocationGroupSize; - HostAdapter->IncrementalCCBs = BusLogic_CCB_AllocationGroupSize; + adapter->drvr_qdepth = adapter->mbox_count; + adapter->initccbs = 4 * BLOGIC_CCB_GRP_ALLOCSIZE; + adapter->inc_ccbs = BLOGIC_CCB_GRP_ALLOCSIZE; /* - Tagged Queuing support is available and operates properly on all "W" series - MultiMaster Host Adapters, on "C" series MultiMaster Host Adapters with - firmware version 4.22 and above, and on "S" series MultiMaster Host - Adapters with firmware version 3.35 and above. + Tagged Queuing support is available and operates properly on + all "W" series MultiMaster Host Adapters, on "C" series + MultiMaster Host Adapters with firmware version 4.22 and above, + and on "S" series MultiMaster Host Adapters with firmware version + 3.35 and above. */ - HostAdapter->TaggedQueuingPermitted = 0; - switch (HostAdapter->FirmwareVersion[0]) { + adapter->tagq_ok = 0; + switch (adapter->fw_ver[0]) { case '5': - HostAdapter->TaggedQueuingPermitted = 0xFFFF; + adapter->tagq_ok = 0xFFFF; break; case '4': - if (strcmp(HostAdapter->FirmwareVersion, "4.22") >= 0) - HostAdapter->TaggedQueuingPermitted = 0xFFFF; + if (strcmp(adapter->fw_ver, "4.22") >= 0) + adapter->tagq_ok = 0xFFFF; break; case '3': - if (strcmp(HostAdapter->FirmwareVersion, "3.35") >= 0) - HostAdapter->TaggedQueuingPermitted = 0xFFFF; + if (strcmp(adapter->fw_ver, "3.35") >= 0) + adapter->tagq_ok = 0xFFFF; break; } /* Determine the Host Adapter BIOS Address if the BIOS is enabled and save it in the Host Adapter structure. The BIOS is disabled if the - BIOS_Address is 0. + bios_addr is 0. */ - HostAdapter->BIOS_Address = ExtendedSetupInformation.BIOS_Address << 12; + adapter->bios_addr = ext_setupinfo.bios_addr << 12; /* - ISA Host Adapters require Bounce Buffers if there is more than 16MB memory. + ISA Host Adapters require Bounce Buffers if there is more than + 16MB memory. */ - if (HostAdapter->HostAdapterBusType == BusLogic_ISA_Bus && (void *) high_memory > (void *) MAX_DMA_ADDRESS) - HostAdapter->BounceBuffersRequired = true; + if (adapter->adapter_bus_type == BLOGIC_ISA_BUS && + (void *) high_memory > (void *) MAX_DMA_ADDRESS) + adapter->need_bouncebuf = true; /* - BusLogic BT-445S Host Adapters prior to board revision E have a hardware - bug whereby when the BIOS is enabled, transfers to/from the same address - range the BIOS occupies modulo 16MB are handled incorrectly. Only properly - functioning BT-445S Host Adapters have firmware version 3.37, so require - that ISA Bounce Buffers be used for the buggy BT-445S models if there is - more than 16MB memory. + BusLogic BT-445S Host Adapters prior to board revision E have a + hardware bug whereby when the BIOS is enabled, transfers to/from + the same address range the BIOS occupies modulo 16MB are handled + incorrectly. Only properly functioning BT-445S Host Adapters + have firmware version 3.37, so require that ISA Bounce Buffers + be used for the buggy BT-445S models if there is more than 16MB + memory. */ - if (HostAdapter->BIOS_Address > 0 && strcmp(HostAdapter->ModelName, "BT-445S") == 0 && strcmp(HostAdapter->FirmwareVersion, "3.37") < 0 && (void *) high_memory > (void *) MAX_DMA_ADDRESS) - HostAdapter->BounceBuffersRequired = true; + if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 && + strcmp(adapter->fw_ver, "3.37") < 0 && + (void *) high_memory > (void *) MAX_DMA_ADDRESS) + adapter->need_bouncebuf = true; /* - Initialize parameters common to MultiMaster and FlashPoint Host Adapters. + Initialize parameters common to MultiMaster and FlashPoint + Host Adapters. */ - Common: +common: /* Initialize the Host Adapter Full Model Name from the Model Name. */ - strcpy(HostAdapter->FullModelName, "BusLogic "); - strcat(HostAdapter->FullModelName, HostAdapter->ModelName); + strcpy(adapter->full_model, "BusLogic "); + strcat(adapter->full_model, adapter->model); /* Select an appropriate value for the Tagged Queue Depth either from a BusLogic Driver Options specification, or based on whether this Host - Adapter requires that ISA Bounce Buffers be used. The Tagged Queue Depth - is left at 0 for automatic determination in BusLogic_SelectQueueDepths. - Initialize the Untagged Queue Depth. - */ - for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++) { - unsigned char QueueDepth = 0; - if (HostAdapter->DriverOptions != NULL && HostAdapter->DriverOptions->QueueDepth[TargetID] > 0) - QueueDepth = HostAdapter->DriverOptions->QueueDepth[TargetID]; - else if (HostAdapter->BounceBuffersRequired) - QueueDepth = BusLogic_TaggedQueueDepthBB; - HostAdapter->QueueDepth[TargetID] = QueueDepth; - } - if (HostAdapter->BounceBuffersRequired) - HostAdapter->UntaggedQueueDepth = BusLogic_UntaggedQueueDepthBB; + Adapter requires that ISA Bounce Buffers be used. The Tagged Queue + Depth is left at 0 for automatic determination in + BusLogic_SelectQueueDepths. Initialize the Untagged Queue Depth. + */ + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { + unsigned char qdepth = 0; + if (adapter->drvr_opts != NULL && + adapter->drvr_opts->qdepth[tgt_id] > 0) + qdepth = adapter->drvr_opts->qdepth[tgt_id]; + else if (adapter->need_bouncebuf) + qdepth = BLOGIC_TAG_DEPTH_BB; + adapter->qdepth[tgt_id] = qdepth; + } + if (adapter->need_bouncebuf) + adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH_BB; else - HostAdapter->UntaggedQueueDepth = BusLogic_UntaggedQueueDepth; - if (HostAdapter->DriverOptions != NULL) - HostAdapter->CommonQueueDepth = HostAdapter->DriverOptions->CommonQueueDepth; - if (HostAdapter->CommonQueueDepth > 0 && HostAdapter->CommonQueueDepth < HostAdapter->UntaggedQueueDepth) - HostAdapter->UntaggedQueueDepth = HostAdapter->CommonQueueDepth; + adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH; + if (adapter->drvr_opts != NULL) + adapter->common_qdepth = adapter->drvr_opts->common_qdepth; + if (adapter->common_qdepth > 0 && + adapter->common_qdepth < adapter->untag_qdepth) + adapter->untag_qdepth = adapter->common_qdepth; /* Tagged Queuing is only allowed if Disconnect/Reconnect is permitted. Therefore, mask the Tagged Queuing Permitted Default bits with the Disconnect/Reconnect Permitted bits. */ - HostAdapter->TaggedQueuingPermitted &= HostAdapter->DisconnectPermitted; + adapter->tagq_ok &= adapter->discon_ok; /* - Combine the default Tagged Queuing Permitted bits with any BusLogic Driver - Options Tagged Queuing specification. + Combine the default Tagged Queuing Permitted bits with any + BusLogic Driver Options Tagged Queuing specification. */ - if (HostAdapter->DriverOptions != NULL) - HostAdapter->TaggedQueuingPermitted = - (HostAdapter->DriverOptions->TaggedQueuingPermitted & HostAdapter->DriverOptions->TaggedQueuingPermittedMask) | (HostAdapter->TaggedQueuingPermitted & ~HostAdapter->DriverOptions->TaggedQueuingPermittedMask); + if (adapter->drvr_opts != NULL) + adapter->tagq_ok = (adapter->drvr_opts->tagq_ok & + adapter->drvr_opts->tagq_ok_mask) | + (adapter->tagq_ok & ~adapter->drvr_opts->tagq_ok_mask); /* - Select an appropriate value for Bus Settle Time either from a BusLogic - Driver Options specification, or from BusLogic_DefaultBusSettleTime. + Select an appropriate value for Bus Settle Time either from a + BusLogic Driver Options specification, or from + BLOGIC_BUS_SETTLE_TIME. */ - if (HostAdapter->DriverOptions != NULL && HostAdapter->DriverOptions->BusSettleTime > 0) - HostAdapter->BusSettleTime = HostAdapter->DriverOptions->BusSettleTime; + if (adapter->drvr_opts != NULL && + adapter->drvr_opts->bus_settle_time > 0) + adapter->bus_settle_time = adapter->drvr_opts->bus_settle_time; else - HostAdapter->BusSettleTime = BusLogic_DefaultBusSettleTime; + adapter->bus_settle_time = BLOGIC_BUS_SETTLE_TIME; /* - Indicate reading the Host Adapter Configuration completed successfully. + Indicate reading the Host Adapter Configuration completed + successfully. */ return true; } /* - BusLogic_ReportHostAdapterConfiguration reports the configuration of - Host Adapter. + blogic_reportconfig reports the configuration of Host Adapter. */ -static bool __init BusLogic_ReportHostAdapterConfiguration(struct BusLogic_HostAdapter - *HostAdapter) +static bool __init blogic_reportconfig(struct blogic_adapter *adapter) { - unsigned short AllTargetsMask = (1 << HostAdapter->MaxTargetDevices) - 1; - unsigned short SynchronousPermitted, FastPermitted; - unsigned short UltraPermitted, WidePermitted; - unsigned short DisconnectPermitted, TaggedQueuingPermitted; - bool CommonSynchronousNegotiation, CommonTaggedQueueDepth; - char SynchronousString[BusLogic_MaxTargetDevices + 1]; - char WideString[BusLogic_MaxTargetDevices + 1]; - char DisconnectString[BusLogic_MaxTargetDevices + 1]; - char TaggedQueuingString[BusLogic_MaxTargetDevices + 1]; - char *SynchronousMessage = SynchronousString; - char *WideMessage = WideString; - char *DisconnectMessage = DisconnectString; - char *TaggedQueuingMessage = TaggedQueuingString; - int TargetID; - BusLogic_Info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", - HostAdapter, HostAdapter->ModelName, - BusLogic_HostAdapterBusNames[HostAdapter->HostAdapterBusType], (HostAdapter->HostWideSCSI ? " Wide" : ""), (HostAdapter->HostDifferentialSCSI ? " Differential" : ""), (HostAdapter->HostUltraSCSI ? " Ultra" : "")); - BusLogic_Info(" Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", HostAdapter, HostAdapter->FirmwareVersion, HostAdapter->IO_Address, HostAdapter->IRQ_Channel, (HostAdapter->LevelSensitiveInterrupt ? "Level" : "Edge")); - if (HostAdapter->HostAdapterBusType != BusLogic_PCI_Bus) { - BusLogic_Info(" DMA Channel: ", HostAdapter); - if (HostAdapter->DMA_Channel > 0) - BusLogic_Info("%d, ", HostAdapter, HostAdapter->DMA_Channel); + unsigned short alltgt_mask = (1 << adapter->maxdev) - 1; + unsigned short sync_ok, fast_ok; + unsigned short ultra_ok, wide_ok; + unsigned short discon_ok, tagq_ok; + bool common_syncneg, common_tagq_depth; + char syncstr[BLOGIC_MAXDEV + 1]; + char widestr[BLOGIC_MAXDEV + 1]; + char discon_str[BLOGIC_MAXDEV + 1]; + char tagq_str[BLOGIC_MAXDEV + 1]; + char *syncmsg = syncstr; + char *widemsg = widestr; + char *discon_msg = discon_str; + char *tagq_msg = tagq_str; + int tgt_id; + + blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : "")); + blogic_info(" Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge")); + if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) { + blogic_info(" DMA Channel: ", adapter); + if (adapter->dma_ch > 0) + blogic_info("%d, ", adapter, adapter->dma_ch); else - BusLogic_Info("None, ", HostAdapter); - if (HostAdapter->BIOS_Address > 0) - BusLogic_Info("BIOS Address: 0x%X, ", HostAdapter, HostAdapter->BIOS_Address); + blogic_info("None, ", adapter); + if (adapter->bios_addr > 0) + blogic_info("BIOS Address: 0x%X, ", adapter, + adapter->bios_addr); else - BusLogic_Info("BIOS Address: None, ", HostAdapter); + blogic_info("BIOS Address: None, ", adapter); } else { - BusLogic_Info(" PCI Bus: %d, Device: %d, Address: ", HostAdapter, HostAdapter->Bus, HostAdapter->Device); - if (HostAdapter->PCI_Address > 0) - BusLogic_Info("0x%X, ", HostAdapter, HostAdapter->PCI_Address); + blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter, + adapter->bus, adapter->dev); + if (adapter->pci_addr > 0) + blogic_info("0x%X, ", adapter, adapter->pci_addr); else - BusLogic_Info("Unassigned, ", HostAdapter); - } - BusLogic_Info("Host Adapter SCSI ID: %d\n", HostAdapter, HostAdapter->SCSI_ID); - BusLogic_Info(" Parity Checking: %s, Extended Translation: %s\n", HostAdapter, (HostAdapter->ParityCheckingEnabled ? "Enabled" : "Disabled"), (HostAdapter->ExtendedTranslationEnabled ? "Enabled" : "Disabled")); - AllTargetsMask &= ~(1 << HostAdapter->SCSI_ID); - SynchronousPermitted = HostAdapter->SynchronousPermitted & AllTargetsMask; - FastPermitted = HostAdapter->FastPermitted & AllTargetsMask; - UltraPermitted = HostAdapter->UltraPermitted & AllTargetsMask; - if ((BusLogic_MultiMasterHostAdapterP(HostAdapter) && (HostAdapter->FirmwareVersion[0] >= '4' || HostAdapter->HostAdapterBusType == BusLogic_EISA_Bus)) || BusLogic_FlashPointHostAdapterP(HostAdapter)) { - CommonSynchronousNegotiation = false; - if (SynchronousPermitted == 0) { - SynchronousMessage = "Disabled"; - CommonSynchronousNegotiation = true; - } else if (SynchronousPermitted == AllTargetsMask) { - if (FastPermitted == 0) { - SynchronousMessage = "Slow"; - CommonSynchronousNegotiation = true; - } else if (FastPermitted == AllTargetsMask) { - if (UltraPermitted == 0) { - SynchronousMessage = "Fast"; - CommonSynchronousNegotiation = true; - } else if (UltraPermitted == AllTargetsMask) { - SynchronousMessage = "Ultra"; - CommonSynchronousNegotiation = true; + blogic_info("Unassigned, ", adapter); + } + blogic_info("Host Adapter SCSI ID: %d\n", adapter, adapter->scsi_id); + blogic_info(" Parity Checking: %s, Extended Translation: %s\n", + adapter, (adapter->parity ? "Enabled" : "Disabled"), + (adapter->ext_trans_enable ? "Enabled" : "Disabled")); + alltgt_mask &= ~(1 << adapter->scsi_id); + sync_ok = adapter->sync_ok & alltgt_mask; + fast_ok = adapter->fast_ok & alltgt_mask; + ultra_ok = adapter->ultra_ok & alltgt_mask; + if ((blogic_multimaster_type(adapter) && + (adapter->fw_ver[0] >= '4' || + adapter->adapter_bus_type == BLOGIC_EISA_BUS)) || + blogic_flashpoint_type(adapter)) { + common_syncneg = false; + if (sync_ok == 0) { + syncmsg = "Disabled"; + common_syncneg = true; + } else if (sync_ok == alltgt_mask) { + if (fast_ok == 0) { + syncmsg = "Slow"; + common_syncneg = true; + } else if (fast_ok == alltgt_mask) { + if (ultra_ok == 0) { + syncmsg = "Fast"; + common_syncneg = true; + } else if (ultra_ok == alltgt_mask) { + syncmsg = "Ultra"; + common_syncneg = true; } } } - if (!CommonSynchronousNegotiation) { - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - SynchronousString[TargetID] = ((!(SynchronousPermitted & (1 << TargetID))) ? 'N' : (!(FastPermitted & (1 << TargetID)) ? 'S' : (!(UltraPermitted & (1 << TargetID)) ? 'F' : 'U'))); - SynchronousString[HostAdapter->SCSI_ID] = '#'; - SynchronousString[HostAdapter->MaxTargetDevices] = '\0'; + if (!common_syncneg) { + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + syncstr[tgt_id] = ((!(sync_ok & (1 << tgt_id))) ? 'N' : (!(fast_ok & (1 << tgt_id)) ? 'S' : (!(ultra_ok & (1 << tgt_id)) ? 'F' : 'U'))); + syncstr[adapter->scsi_id] = '#'; + syncstr[adapter->maxdev] = '\0'; } } else - SynchronousMessage = (SynchronousPermitted == 0 ? "Disabled" : "Enabled"); - WidePermitted = HostAdapter->WidePermitted & AllTargetsMask; - if (WidePermitted == 0) - WideMessage = "Disabled"; - else if (WidePermitted == AllTargetsMask) - WideMessage = "Enabled"; + syncmsg = (sync_ok == 0 ? "Disabled" : "Enabled"); + wide_ok = adapter->wide_ok & alltgt_mask; + if (wide_ok == 0) + widemsg = "Disabled"; + else if (wide_ok == alltgt_mask) + widemsg = "Enabled"; else { - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - WideString[TargetID] = ((WidePermitted & (1 << TargetID)) ? 'Y' : 'N'); - WideString[HostAdapter->SCSI_ID] = '#'; - WideString[HostAdapter->MaxTargetDevices] = '\0'; - } - DisconnectPermitted = HostAdapter->DisconnectPermitted & AllTargetsMask; - if (DisconnectPermitted == 0) - DisconnectMessage = "Disabled"; - else if (DisconnectPermitted == AllTargetsMask) - DisconnectMessage = "Enabled"; + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + widestr[tgt_id] = ((wide_ok & (1 << tgt_id)) ? 'Y' : 'N'); + widestr[adapter->scsi_id] = '#'; + widestr[adapter->maxdev] = '\0'; + } + discon_ok = adapter->discon_ok & alltgt_mask; + if (discon_ok == 0) + discon_msg = "Disabled"; + else if (discon_ok == alltgt_mask) + discon_msg = "Enabled"; else { - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - DisconnectString[TargetID] = ((DisconnectPermitted & (1 << TargetID)) ? 'Y' : 'N'); - DisconnectString[HostAdapter->SCSI_ID] = '#'; - DisconnectString[HostAdapter->MaxTargetDevices] = '\0'; - } - TaggedQueuingPermitted = HostAdapter->TaggedQueuingPermitted & AllTargetsMask; - if (TaggedQueuingPermitted == 0) - TaggedQueuingMessage = "Disabled"; - else if (TaggedQueuingPermitted == AllTargetsMask) - TaggedQueuingMessage = "Enabled"; + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + discon_str[tgt_id] = ((discon_ok & (1 << tgt_id)) ? 'Y' : 'N'); + discon_str[adapter->scsi_id] = '#'; + discon_str[adapter->maxdev] = '\0'; + } + tagq_ok = adapter->tagq_ok & alltgt_mask; + if (tagq_ok == 0) + tagq_msg = "Disabled"; + else if (tagq_ok == alltgt_mask) + tagq_msg = "Enabled"; else { - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - TaggedQueuingString[TargetID] = ((TaggedQueuingPermitted & (1 << TargetID)) ? 'Y' : 'N'); - TaggedQueuingString[HostAdapter->SCSI_ID] = '#'; - TaggedQueuingString[HostAdapter->MaxTargetDevices] = '\0'; - } - BusLogic_Info(" Synchronous Negotiation: %s, Wide Negotiation: %s\n", HostAdapter, SynchronousMessage, WideMessage); - BusLogic_Info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", HostAdapter, DisconnectMessage, TaggedQueuingMessage); - if (BusLogic_MultiMasterHostAdapterP(HostAdapter)) { - BusLogic_Info(" Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", HostAdapter, HostAdapter->DriverScatterGatherLimit, HostAdapter->HostAdapterScatterGatherLimit, HostAdapter->MailboxCount); - BusLogic_Info(" Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", HostAdapter, HostAdapter->DriverQueueDepth, HostAdapter->HostAdapterQueueDepth); + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + tagq_str[tgt_id] = ((tagq_ok & (1 << tgt_id)) ? 'Y' : 'N'); + tagq_str[adapter->scsi_id] = '#'; + tagq_str[adapter->maxdev] = '\0'; + } + blogic_info(" Synchronous Negotiation: %s, Wide Negotiation: %s\n", + adapter, syncmsg, widemsg); + blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter, + discon_msg, tagq_msg); + if (blogic_multimaster_type(adapter)) { + blogic_info(" Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count); + blogic_info(" Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth); } else - BusLogic_Info(" Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", HostAdapter, HostAdapter->DriverQueueDepth, HostAdapter->DriverScatterGatherLimit); - BusLogic_Info(" Tagged Queue Depth: ", HostAdapter); - CommonTaggedQueueDepth = true; - for (TargetID = 1; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - if (HostAdapter->QueueDepth[TargetID] != HostAdapter->QueueDepth[0]) { - CommonTaggedQueueDepth = false; + blogic_info(" Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit); + blogic_info(" Tagged Queue Depth: ", adapter); + common_tagq_depth = true; + for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++) + if (adapter->qdepth[tgt_id] != adapter->qdepth[0]) { + common_tagq_depth = false; break; } - if (CommonTaggedQueueDepth) { - if (HostAdapter->QueueDepth[0] > 0) - BusLogic_Info("%d", HostAdapter, HostAdapter->QueueDepth[0]); + if (common_tagq_depth) { + if (adapter->qdepth[0] > 0) + blogic_info("%d", adapter, adapter->qdepth[0]); else - BusLogic_Info("Automatic", HostAdapter); + blogic_info("Automatic", adapter); } else - BusLogic_Info("Individual", HostAdapter); - BusLogic_Info(", Untagged Queue Depth: %d\n", HostAdapter, HostAdapter->UntaggedQueueDepth); - if (HostAdapter->TerminationInfoValid) { - if (HostAdapter->HostWideSCSI) - BusLogic_Info(" SCSI Bus Termination: %s", HostAdapter, (HostAdapter->LowByteTerminated ? (HostAdapter->HighByteTerminated ? "Both Enabled" : "Low Enabled") - : (HostAdapter->HighByteTerminated ? "High Enabled" : "Both Disabled"))); + blogic_info("Individual", adapter); + blogic_info(", Untagged Queue Depth: %d\n", adapter, + adapter->untag_qdepth); + if (adapter->terminfo_valid) { + if (adapter->wide) + blogic_info(" SCSI Bus Termination: %s", adapter, + (adapter->low_term ? (adapter->high_term ? "Both Enabled" : "Low Enabled") : (adapter->high_term ? "High Enabled" : "Both Disabled"))); else - BusLogic_Info(" SCSI Bus Termination: %s", HostAdapter, (HostAdapter->LowByteTerminated ? "Enabled" : "Disabled")); - if (HostAdapter->HostSupportsSCAM) - BusLogic_Info(", SCAM: %s", HostAdapter, (HostAdapter->SCAM_Enabled ? (HostAdapter->SCAM_Level2 ? "Enabled, Level 2" : "Enabled, Level 1") - : "Disabled")); - BusLogic_Info("\n", HostAdapter); + blogic_info(" SCSI Bus Termination: %s", adapter, + (adapter->low_term ? "Enabled" : "Disabled")); + if (adapter->scam) + blogic_info(", SCAM: %s", adapter, + (adapter->scam_enabled ? (adapter->scam_lev2 ? "Enabled, Level 2" : "Enabled, Level 1") : "Disabled")); + blogic_info("\n", adapter); } /* - Indicate reporting the Host Adapter configuration completed successfully. + Indicate reporting the Host Adapter configuration completed + successfully. */ return true; } /* - BusLogic_AcquireResources acquires the system resources necessary to use + blogic_getres acquires the system resources necessary to use Host Adapter. */ -static bool __init BusLogic_AcquireResources(struct BusLogic_HostAdapter *HostAdapter) +static bool __init blogic_getres(struct blogic_adapter *adapter) { - if (HostAdapter->IRQ_Channel == 0) { - BusLogic_Error("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n", HostAdapter); + if (adapter->irq_ch == 0) { + blogic_err("NO LEGAL INTERRUPT CHANNEL ASSIGNED - DETACHING\n", + adapter); return false; } /* Acquire shared access to the IRQ Channel. */ - if (request_irq(HostAdapter->IRQ_Channel, BusLogic_InterruptHandler, IRQF_SHARED, HostAdapter->FullModelName, HostAdapter) < 0) { - BusLogic_Error("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n", HostAdapter, HostAdapter->IRQ_Channel); + if (request_irq(adapter->irq_ch, blogic_inthandler, IRQF_SHARED, + adapter->full_model, adapter) < 0) { + blogic_err("UNABLE TO ACQUIRE IRQ CHANNEL %d - DETACHING\n", + adapter, adapter->irq_ch); return false; } - HostAdapter->IRQ_ChannelAcquired = true; + adapter->irq_acquired = true; /* Acquire exclusive access to the DMA Channel. */ - if (HostAdapter->DMA_Channel > 0) { - if (request_dma(HostAdapter->DMA_Channel, HostAdapter->FullModelName) < 0) { - BusLogic_Error("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", HostAdapter, HostAdapter->DMA_Channel); + if (adapter->dma_ch > 0) { + if (request_dma(adapter->dma_ch, adapter->full_model) < 0) { + blogic_err("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", adapter, adapter->dma_ch); return false; } - set_dma_mode(HostAdapter->DMA_Channel, DMA_MODE_CASCADE); - enable_dma(HostAdapter->DMA_Channel); - HostAdapter->DMA_ChannelAcquired = true; + set_dma_mode(adapter->dma_ch, DMA_MODE_CASCADE); + enable_dma(adapter->dma_ch); + adapter->dma_chan_acquired = true; } /* Indicate the System Resource Acquisition completed successfully, @@ -1874,127 +2020,146 @@ static bool __init BusLogic_AcquireResources(struct BusLogic_HostAdapter *HostAd /* - BusLogic_ReleaseResources releases any system resources previously acquired - by BusLogic_AcquireResources. + blogic_relres releases any system resources previously acquired + by blogic_getres. */ -static void BusLogic_ReleaseResources(struct BusLogic_HostAdapter *HostAdapter) +static void blogic_relres(struct blogic_adapter *adapter) { /* Release shared access to the IRQ Channel. */ - if (HostAdapter->IRQ_ChannelAcquired) - free_irq(HostAdapter->IRQ_Channel, HostAdapter); + if (adapter->irq_acquired) + free_irq(adapter->irq_ch, adapter); /* Release exclusive access to the DMA Channel. */ - if (HostAdapter->DMA_ChannelAcquired) - free_dma(HostAdapter->DMA_Channel); + if (adapter->dma_chan_acquired) + free_dma(adapter->dma_ch); /* Release any allocated memory structs not released elsewhere */ - if (HostAdapter->MailboxSpace) - pci_free_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, HostAdapter->MailboxSpace, HostAdapter->MailboxSpaceHandle); - pci_dev_put(HostAdapter->PCI_Device); - HostAdapter->MailboxSpace = NULL; - HostAdapter->MailboxSpaceHandle = 0; - HostAdapter->MailboxSize = 0; + if (adapter->mbox_space) + pci_free_consistent(adapter->pci_device, adapter->mbox_sz, + adapter->mbox_space, adapter->mbox_space_handle); + pci_dev_put(adapter->pci_device); + adapter->mbox_space = NULL; + adapter->mbox_space_handle = 0; + adapter->mbox_sz = 0; } /* - BusLogic_InitializeHostAdapter initializes Host Adapter. This is the only + blogic_initadapter initializes Host Adapter. This is the only function called during SCSI Host Adapter detection which modifies the state of the Host Adapter from its initial power on or hard reset state. */ -static bool BusLogic_InitializeHostAdapter(struct BusLogic_HostAdapter - *HostAdapter) +static bool blogic_initadapter(struct blogic_adapter *adapter) { - struct BusLogic_ExtendedMailboxRequest ExtendedMailboxRequest; - enum BusLogic_RoundRobinModeRequest RoundRobinModeRequest; - enum BusLogic_SetCCBFormatRequest SetCCBFormatRequest; - int TargetID; + struct blogic_extmbox_req extmbox_req; + enum blogic_rr_req rr_req; + enum blogic_setccb_fmt setccb_fmt; + int tgt_id; + /* - Initialize the pointers to the first and last CCBs that are queued for - completion processing. + Initialize the pointers to the first and last CCBs that are + queued for completion processing. */ - HostAdapter->FirstCompletedCCB = NULL; - HostAdapter->LastCompletedCCB = NULL; + adapter->firstccb = NULL; + adapter->lastccb = NULL; + /* Initialize the Bus Device Reset Pending CCB, Tagged Queuing Active, Command Successful Flag, Active Commands, and Commands Since Reset for each Target Device. */ - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) { - HostAdapter->BusDeviceResetPendingCCB[TargetID] = NULL; - HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false; - HostAdapter->TargetFlags[TargetID].CommandSuccessfulFlag = false; - HostAdapter->ActiveCommands[TargetID] = 0; - HostAdapter->CommandsSinceReset[TargetID] = 0; + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) { + adapter->bdr_pend[tgt_id] = NULL; + adapter->tgt_flags[tgt_id].tagq_active = false; + adapter->tgt_flags[tgt_id].cmd_good = false; + adapter->active_cmds[tgt_id] = 0; + adapter->cmds_since_rst[tgt_id] = 0; } + /* FlashPoint Host Adapters do not use Outgoing and Incoming Mailboxes. */ - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) - goto Done; + if (blogic_flashpoint_type(adapter)) + goto done; + /* Initialize the Outgoing and Incoming Mailbox pointers. */ - HostAdapter->MailboxSize = HostAdapter->MailboxCount * (sizeof(struct BusLogic_OutgoingMailbox) + sizeof(struct BusLogic_IncomingMailbox)); - HostAdapter->MailboxSpace = pci_alloc_consistent(HostAdapter->PCI_Device, HostAdapter->MailboxSize, &HostAdapter->MailboxSpaceHandle); - if (HostAdapter->MailboxSpace == NULL) - return BusLogic_Failure(HostAdapter, "MAILBOX ALLOCATION"); - HostAdapter->FirstOutgoingMailbox = (struct BusLogic_OutgoingMailbox *) HostAdapter->MailboxSpace; - HostAdapter->LastOutgoingMailbox = HostAdapter->FirstOutgoingMailbox + HostAdapter->MailboxCount - 1; - HostAdapter->NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox; - HostAdapter->FirstIncomingMailbox = (struct BusLogic_IncomingMailbox *) (HostAdapter->LastOutgoingMailbox + 1); - HostAdapter->LastIncomingMailbox = HostAdapter->FirstIncomingMailbox + HostAdapter->MailboxCount - 1; - HostAdapter->NextIncomingMailbox = HostAdapter->FirstIncomingMailbox; + adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox)); + adapter->mbox_space = pci_alloc_consistent(adapter->pci_device, + adapter->mbox_sz, &adapter->mbox_space_handle); + if (adapter->mbox_space == NULL) + return blogic_failure(adapter, "MAILBOX ALLOCATION"); + adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space; + adapter->last_outbox = adapter->first_outbox + adapter->mbox_count - 1; + adapter->next_outbox = adapter->first_outbox; + adapter->first_inbox = (struct blogic_inbox *) (adapter->last_outbox + 1); + adapter->last_inbox = adapter->first_inbox + adapter->mbox_count - 1; + adapter->next_inbox = adapter->first_inbox; /* Initialize the Outgoing and Incoming Mailbox structures. */ - memset(HostAdapter->FirstOutgoingMailbox, 0, HostAdapter->MailboxCount * sizeof(struct BusLogic_OutgoingMailbox)); - memset(HostAdapter->FirstIncomingMailbox, 0, HostAdapter->MailboxCount * sizeof(struct BusLogic_IncomingMailbox)); + memset(adapter->first_outbox, 0, + adapter->mbox_count * sizeof(struct blogic_outbox)); + memset(adapter->first_inbox, 0, + adapter->mbox_count * sizeof(struct blogic_inbox)); + /* - Initialize the Host Adapter's Pointer to the Outgoing/Incoming Mailboxes. + Initialize the Host Adapter's Pointer to the Outgoing/Incoming + Mailboxes. */ - ExtendedMailboxRequest.MailboxCount = HostAdapter->MailboxCount; - ExtendedMailboxRequest.BaseMailboxAddress = (u32) HostAdapter->MailboxSpaceHandle; - if (BusLogic_Command(HostAdapter, BusLogic_InitializeExtendedMailbox, &ExtendedMailboxRequest, sizeof(ExtendedMailboxRequest), NULL, 0) < 0) - return BusLogic_Failure(HostAdapter, "MAILBOX INITIALIZATION"); + extmbox_req.mbox_count = adapter->mbox_count; + extmbox_req.base_mbox_addr = (u32) adapter->mbox_space_handle; + if (blogic_cmd(adapter, BLOGIC_INIT_EXT_MBOX, &extmbox_req, + sizeof(extmbox_req), NULL, 0) < 0) + return blogic_failure(adapter, "MAILBOX INITIALIZATION"); /* - Enable Strict Round Robin Mode if supported by the Host Adapter. In - Strict Round Robin Mode, the Host Adapter only looks at the next Outgoing - Mailbox for each new command, rather than scanning through all the - Outgoing Mailboxes to find any that have new commands in them. Strict - Round Robin Mode is significantly more efficient. + Enable Strict Round Robin Mode if supported by the Host Adapter. In + Strict Round Robin Mode, the Host Adapter only looks at the next + Outgoing Mailbox for each new command, rather than scanning + through all the Outgoing Mailboxes to find any that have new + commands in them. Strict Round Robin Mode is significantly more + efficient. */ - if (HostAdapter->StrictRoundRobinModeSupport) { - RoundRobinModeRequest = BusLogic_StrictRoundRobinMode; - if (BusLogic_Command(HostAdapter, BusLogic_EnableStrictRoundRobinMode, &RoundRobinModeRequest, sizeof(RoundRobinModeRequest), NULL, 0) < 0) - return BusLogic_Failure(HostAdapter, "ENABLE STRICT ROUND ROBIN MODE"); + if (adapter->strict_rr) { + rr_req = BLOGIC_STRICT_RR_MODE; + if (blogic_cmd(adapter, BLOGIC_STRICT_RR, &rr_req, + sizeof(rr_req), NULL, 0) < 0) + return blogic_failure(adapter, + "ENABLE STRICT ROUND ROBIN MODE"); } + /* - For Host Adapters that support Extended LUN Format CCBs, issue the Set CCB - Format command to allow 32 Logical Units per Target Device. + For Host Adapters that support Extended LUN Format CCBs, issue the + Set CCB Format command to allow 32 Logical Units per Target Device. */ - if (HostAdapter->ExtendedLUNSupport) { - SetCCBFormatRequest = BusLogic_ExtendedLUNFormatCCB; - if (BusLogic_Command(HostAdapter, BusLogic_SetCCBFormat, &SetCCBFormatRequest, sizeof(SetCCBFormatRequest), NULL, 0) < 0) - return BusLogic_Failure(HostAdapter, "SET CCB FORMAT"); + if (adapter->ext_lun) { + setccb_fmt = BLOGIC_EXT_LUN_CCB; + if (blogic_cmd(adapter, BLOGIC_SETCCB_FMT, &setccb_fmt, + sizeof(setccb_fmt), NULL, 0) < 0) + return blogic_failure(adapter, "SET CCB FORMAT"); } + /* Announce Successful Initialization. */ - Done: - if (!HostAdapter->HostAdapterInitialized) { - BusLogic_Info("*** %s Initialized Successfully ***\n", HostAdapter, HostAdapter->FullModelName); - BusLogic_Info("\n", HostAdapter); +done: + if (!adapter->adapter_initd) { + blogic_info("*** %s Initialized Successfully ***\n", adapter, + adapter->full_model); + blogic_info("\n", adapter); } else - BusLogic_Warning("*** %s Initialized Successfully ***\n", HostAdapter, HostAdapter->FullModelName); - HostAdapter->HostAdapterInitialized = true; + blogic_warn("*** %s Initialized Successfully ***\n", adapter, + adapter->full_model); + adapter->adapter_initd = true; + /* Indicate the Host Adapter Initialization completed successfully. */ @@ -2003,109 +2168,116 @@ static bool BusLogic_InitializeHostAdapter(struct BusLogic_HostAdapter /* - BusLogic_TargetDeviceInquiry inquires about the Target Devices accessible + blogic_inquiry inquires about the Target Devices accessible through Host Adapter. */ -static bool __init BusLogic_TargetDeviceInquiry(struct BusLogic_HostAdapter - *HostAdapter) +static bool __init blogic_inquiry(struct blogic_adapter *adapter) { - u16 InstalledDevices; - u8 InstalledDevicesID0to7[8]; - struct BusLogic_SetupInformation SetupInformation; - u8 SynchronousPeriod[BusLogic_MaxTargetDevices]; - unsigned char RequestedReplyLength; - int TargetID; + u16 installed_devs; + u8 installed_devs0to7[8]; + struct blogic_setup_info setupinfo; + u8 sync_period[BLOGIC_MAXDEV]; + unsigned char req_replylen; + int tgt_id; + /* - Wait a few seconds between the Host Adapter Hard Reset which initiates - a SCSI Bus Reset and issuing any SCSI Commands. Some SCSI devices get - confused if they receive SCSI Commands too soon after a SCSI Bus Reset. + Wait a few seconds between the Host Adapter Hard Reset which + initiates a SCSI Bus Reset and issuing any SCSI Commands. Some + SCSI devices get confused if they receive SCSI Commands too soon + after a SCSI Bus Reset. */ - BusLogic_Delay(HostAdapter->BusSettleTime); + blogic_delay(adapter->bus_settle_time); /* FlashPoint Host Adapters do not provide for Target Device Inquiry. */ - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) + if (blogic_flashpoint_type(adapter)) return true; /* Inhibit the Target Device Inquiry if requested. */ - if (HostAdapter->DriverOptions != NULL && HostAdapter->DriverOptions->LocalOptions.InhibitTargetInquiry) + if (adapter->drvr_opts != NULL && adapter->drvr_opts->stop_tgt_inquiry) return true; /* - Issue the Inquire Target Devices command for host adapters with firmware - version 4.25 or later, or the Inquire Installed Devices ID 0 to 7 command - for older host adapters. This is necessary to force Synchronous Transfer - Negotiation so that the Inquire Setup Information and Inquire Synchronous - Period commands will return valid data. The Inquire Target Devices command - is preferable to Inquire Installed Devices ID 0 to 7 since it only probes - Logical Unit 0 of each Target Device. + Issue the Inquire Target Devices command for host adapters with + firmware version 4.25 or later, or the Inquire Installed Devices + ID 0 to 7 command for older host adapters. This is necessary to + force Synchronous Transfer Negotiation so that the Inquire Setup + Information and Inquire Synchronous Period commands will return + valid data. The Inquire Target Devices command is preferable to + Inquire Installed Devices ID 0 to 7 since it only probes Logical + Unit 0 of each Target Device. */ - if (strcmp(HostAdapter->FirmwareVersion, "4.25") >= 0) { + if (strcmp(adapter->fw_ver, "4.25") >= 0) { /* - * Issue a Inquire Target Devices command. Inquire Target Devices only - * tests Logical Unit 0 of each Target Device unlike the Inquire Installed - * Devices commands which test Logical Units 0 - 7. Two bytes are - * returned, where byte 0 bit 0 set indicates that Target Device 0 exists, - * and so on. + Issue a Inquire Target Devices command. Inquire Target + Devices only tests Logical Unit 0 of each Target Device + unlike the Inquire Installed Devices commands which test + Logical Units 0 - 7. Two bytes are returned, where byte + 0 bit 0 set indicates that Target Device 0 exists, and so on. */ - if (BusLogic_Command(HostAdapter, BusLogic_InquireTargetDevices, NULL, 0, &InstalledDevices, sizeof(InstalledDevices)) - != sizeof(InstalledDevices)) - return BusLogic_Failure(HostAdapter, "INQUIRE TARGET DEVICES"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - HostAdapter->TargetFlags[TargetID].TargetExists = (InstalledDevices & (1 << TargetID) ? true : false); + if (blogic_cmd(adapter, BLOGIC_INQ_DEV, NULL, 0, + &installed_devs, sizeof(installed_devs)) + != sizeof(installed_devs)) + return blogic_failure(adapter, "INQUIRE TARGET DEVICES"); + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->tgt_flags[tgt_id].tgt_exists = + (installed_devs & (1 << tgt_id) ? true : false); } else { /* - * Issue an Inquire Installed Devices command. For each Target Device, - * a byte is returned where bit 0 set indicates that Logical Unit 0 - * exists, bit 1 set indicates that Logical Unit 1 exists, and so on. + Issue an Inquire Installed Devices command. For each + Target Device, a byte is returned where bit 0 set + indicates that Logical Unit 0 * exists, bit 1 set + indicates that Logical Unit 1 exists, and so on. */ - if (BusLogic_Command(HostAdapter, BusLogic_InquireInstalledDevicesID0to7, NULL, 0, &InstalledDevicesID0to7, sizeof(InstalledDevicesID0to7)) - != sizeof(InstalledDevicesID0to7)) - return BusLogic_Failure(HostAdapter, "INQUIRE INSTALLED DEVICES ID 0 TO 7"); - for (TargetID = 0; TargetID < 8; TargetID++) - HostAdapter->TargetFlags[TargetID].TargetExists = (InstalledDevicesID0to7[TargetID] != 0 ? true : false); + if (blogic_cmd(adapter, BLOGIC_INQ_DEV0TO7, NULL, 0, + &installed_devs0to7, sizeof(installed_devs0to7)) + != sizeof(installed_devs0to7)) + return blogic_failure(adapter, + "INQUIRE INSTALLED DEVICES ID 0 TO 7"); + for (tgt_id = 0; tgt_id < 8; tgt_id++) + adapter->tgt_flags[tgt_id].tgt_exists = + (installed_devs0to7[tgt_id] != 0 ? true : false); } /* Issue the Inquire Setup Information command. */ - RequestedReplyLength = sizeof(SetupInformation); - if (BusLogic_Command(HostAdapter, BusLogic_InquireSetupInformation, &RequestedReplyLength, sizeof(RequestedReplyLength), &SetupInformation, sizeof(SetupInformation)) - != sizeof(SetupInformation)) - return BusLogic_Failure(HostAdapter, "INQUIRE SETUP INFORMATION"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - HostAdapter->SynchronousOffset[TargetID] = (TargetID < 8 ? SetupInformation.SynchronousValuesID0to7[TargetID].Offset : SetupInformation.SynchronousValuesID8to15[TargetID - 8].Offset); - if (strcmp(HostAdapter->FirmwareVersion, "5.06L") >= 0) - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - HostAdapter->TargetFlags[TargetID].WideTransfersActive = (TargetID < 8 ? (SetupInformation.WideTransfersActiveID0to7 & (1 << TargetID) - ? true : false) - : (SetupInformation.WideTransfersActiveID8to15 & (1 << (TargetID - 8)) - ? true : false)); + req_replylen = sizeof(setupinfo); + if (blogic_cmd(adapter, BLOGIC_INQ_SETUPINFO, &req_replylen, + sizeof(req_replylen), &setupinfo, sizeof(setupinfo)) + != sizeof(setupinfo)) + return blogic_failure(adapter, "INQUIRE SETUP INFORMATION"); + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->sync_offset[tgt_id] = (tgt_id < 8 ? setupinfo.sync0to7[tgt_id].offset : setupinfo.sync8to15[tgt_id - 8].offset); + if (strcmp(adapter->fw_ver, "5.06L") >= 0) + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->tgt_flags[tgt_id].wide_active = (tgt_id < 8 ? (setupinfo.wide_tx_active0to7 & (1 << tgt_id) ? true : false) : (setupinfo.wide_tx_active8to15 & (1 << (tgt_id - 8)) ? true : false)); /* Issue the Inquire Synchronous Period command. */ - if (HostAdapter->FirmwareVersion[0] >= '3') { + if (adapter->fw_ver[0] >= '3') { - /* Issue a Inquire Synchronous Period command. For each Target Device, - * a byte is returned which represents the Synchronous Transfer Period - * in units of 10 nanoseconds. + /* Issue a Inquire Synchronous Period command. For each + Target Device, a byte is returned which represents the + Synchronous Transfer Period in units of 10 nanoseconds. */ - RequestedReplyLength = sizeof(SynchronousPeriod); - if (BusLogic_Command(HostAdapter, BusLogic_InquireSynchronousPeriod, &RequestedReplyLength, sizeof(RequestedReplyLength), &SynchronousPeriod, sizeof(SynchronousPeriod)) - != sizeof(SynchronousPeriod)) - return BusLogic_Failure(HostAdapter, "INQUIRE SYNCHRONOUS PERIOD"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - HostAdapter->SynchronousPeriod[TargetID] = SynchronousPeriod[TargetID]; + req_replylen = sizeof(sync_period); + if (blogic_cmd(adapter, BLOGIC_INQ_SYNC_PERIOD, &req_replylen, + sizeof(req_replylen), &sync_period, + sizeof(sync_period)) != sizeof(sync_period)) + return blogic_failure(adapter, + "INQUIRE SYNCHRONOUS PERIOD"); + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + adapter->sync_period[tgt_id] = sync_period[tgt_id]; } else - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - if (SetupInformation.SynchronousValuesID0to7[TargetID].Offset > 0) - HostAdapter->SynchronousPeriod[TargetID] = 20 + 5 * SetupInformation.SynchronousValuesID0to7[TargetID] - .TransferPeriod; + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + if (setupinfo.sync0to7[tgt_id].offset > 0) + adapter->sync_period[tgt_id] = 20 + 5 * setupinfo.sync0to7[tgt_id].tx_period; /* Indicate the Target Device Inquiry completed successfully. */ @@ -2113,7 +2285,7 @@ static bool __init BusLogic_TargetDeviceInquiry(struct BusLogic_HostAdapter } /* - BusLogic_InitializeHostStructure initializes the fields in the SCSI Host + blogic_inithoststruct initializes the fields in the SCSI Host structure. The base, io_port, n_io_ports, irq, and dma_channel fields in the SCSI Host structure are intentionally left uninitialized, as this driver handles acquisition and release of these resources explicitly, as well as @@ -2121,517 +2293,555 @@ static bool __init BusLogic_TargetDeviceInquiry(struct BusLogic_HostAdapter through explicit acquisition and release of the Host Adapter's Lock. */ -static void __init BusLogic_InitializeHostStructure(struct BusLogic_HostAdapter - *HostAdapter, struct Scsi_Host *Host) +static void __init blogic_inithoststruct(struct blogic_adapter *adapter, + struct Scsi_Host *host) { - Host->max_id = HostAdapter->MaxTargetDevices; - Host->max_lun = HostAdapter->MaxLogicalUnits; - Host->max_channel = 0; - Host->unique_id = HostAdapter->IO_Address; - Host->this_id = HostAdapter->SCSI_ID; - Host->can_queue = HostAdapter->DriverQueueDepth; - Host->sg_tablesize = HostAdapter->DriverScatterGatherLimit; - Host->unchecked_isa_dma = HostAdapter->BounceBuffersRequired; - Host->cmd_per_lun = HostAdapter->UntaggedQueueDepth; + host->max_id = adapter->maxdev; + host->max_lun = adapter->maxlun; + host->max_channel = 0; + host->unique_id = adapter->io_addr; + host->this_id = adapter->scsi_id; + host->can_queue = adapter->drvr_qdepth; + host->sg_tablesize = adapter->drvr_sglimit; + host->unchecked_isa_dma = adapter->need_bouncebuf; + host->cmd_per_lun = adapter->untag_qdepth; } /* - BusLogic_SlaveConfigure will actually set the queue depth on individual + blogic_slaveconfig will actually set the queue depth on individual scsi devices as they are permanently added to the device chain. We shamelessly rip off the SelectQueueDepths code to make this work mostly like it used to. Since we don't get called once at the end of the scan but instead get called for each device, we have to do things a bit differently. */ -static int BusLogic_SlaveConfigure(struct scsi_device *Device) +static int blogic_slaveconfig(struct scsi_device *dev) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Device->host->hostdata; - int TargetID = Device->id; - int QueueDepth = HostAdapter->QueueDepth[TargetID]; - - if (HostAdapter->TargetFlags[TargetID].TaggedQueuingSupported && (HostAdapter->TaggedQueuingPermitted & (1 << TargetID))) { - if (QueueDepth == 0) - QueueDepth = BusLogic_MaxAutomaticTaggedQueueDepth; - HostAdapter->QueueDepth[TargetID] = QueueDepth; - scsi_adjust_queue_depth(Device, MSG_SIMPLE_TAG, QueueDepth); + struct blogic_adapter *adapter = + (struct blogic_adapter *) dev->host->hostdata; + int tgt_id = dev->id; + int qdepth = adapter->qdepth[tgt_id]; + + if (adapter->tgt_flags[tgt_id].tagq_ok && + (adapter->tagq_ok & (1 << tgt_id))) { + if (qdepth == 0) + qdepth = BLOGIC_MAX_AUTO_TAG_DEPTH; + adapter->qdepth[tgt_id] = qdepth; + scsi_adjust_queue_depth(dev, MSG_SIMPLE_TAG, qdepth); } else { - HostAdapter->TaggedQueuingPermitted &= ~(1 << TargetID); - QueueDepth = HostAdapter->UntaggedQueueDepth; - HostAdapter->QueueDepth[TargetID] = QueueDepth; - scsi_adjust_queue_depth(Device, 0, QueueDepth); - } - QueueDepth = 0; - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) - if (HostAdapter->TargetFlags[TargetID].TargetExists) { - QueueDepth += HostAdapter->QueueDepth[TargetID]; - } - if (QueueDepth > HostAdapter->AllocatedCCBs) - BusLogic_CreateAdditionalCCBs(HostAdapter, QueueDepth - HostAdapter->AllocatedCCBs, false); + adapter->tagq_ok &= ~(1 << tgt_id); + qdepth = adapter->untag_qdepth; + adapter->qdepth[tgt_id] = qdepth; + scsi_adjust_queue_depth(dev, 0, qdepth); + } + qdepth = 0; + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) + if (adapter->tgt_flags[tgt_id].tgt_exists) + qdepth += adapter->qdepth[tgt_id]; + if (qdepth > adapter->alloc_ccbs) + blogic_create_addlccbs(adapter, qdepth - adapter->alloc_ccbs, + false); return 0; } /* - BusLogic_DetectHostAdapter probes for BusLogic Host Adapters at the standard + blogic_init probes for BusLogic Host Adapters at the standard I/O Addresses where they may be located, initializing, registering, and reporting the configuration of each BusLogic Host Adapter it finds. It returns the number of BusLogic Host Adapters successfully initialized and registered. */ -static int __init BusLogic_init(void) +static int __init blogic_init(void) { - int BusLogicHostAdapterCount = 0, DriverOptionsIndex = 0, ProbeIndex; - struct BusLogic_HostAdapter *PrototypeHostAdapter; + int adapter_count = 0, drvr_optindex = 0, probeindex; + struct blogic_adapter *adapter; int ret = 0; #ifdef MODULE if (BusLogic) - BusLogic_Setup(BusLogic); + blogic_setup(BusLogic); #endif - if (BusLogic_ProbeOptions.NoProbe) + if (blogic_probe_options.noprobe) return -ENODEV; - BusLogic_ProbeInfoList = - kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL); - if (BusLogic_ProbeInfoList == NULL) { - BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL); + blogic_probeinfo_list = + kzalloc(BLOGIC_MAX_ADAPTERS * sizeof(struct blogic_probeinfo), + GFP_KERNEL); + if (blogic_probeinfo_list == NULL) { + blogic_err("BusLogic: Unable to allocate Probe Info List\n", + NULL); return -ENOMEM; } - PrototypeHostAdapter = - kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL); - if (PrototypeHostAdapter == NULL) { - kfree(BusLogic_ProbeInfoList); - BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL); + adapter = kzalloc(sizeof(struct blogic_adapter), GFP_KERNEL); + if (adapter == NULL) { + kfree(blogic_probeinfo_list); + blogic_err("BusLogic: Unable to allocate Prototype Host Adapter\n", NULL); return -ENOMEM; } #ifdef MODULE if (BusLogic != NULL) - BusLogic_Setup(BusLogic); + blogic_setup(BusLogic); #endif - BusLogic_InitializeProbeInfoList(PrototypeHostAdapter); - for (ProbeIndex = 0; ProbeIndex < BusLogic_ProbeInfoCount; ProbeIndex++) { - struct BusLogic_ProbeInfo *ProbeInfo = &BusLogic_ProbeInfoList[ProbeIndex]; - struct BusLogic_HostAdapter *HostAdapter = PrototypeHostAdapter; - struct Scsi_Host *Host; - if (ProbeInfo->IO_Address == 0) + blogic_init_probeinfo_list(adapter); + for (probeindex = 0; probeindex < blogic_probeinfo_count; probeindex++) { + struct blogic_probeinfo *probeinfo = + &blogic_probeinfo_list[probeindex]; + struct blogic_adapter *myadapter = adapter; + struct Scsi_Host *host; + + if (probeinfo->io_addr == 0) continue; - memset(HostAdapter, 0, sizeof(struct BusLogic_HostAdapter)); - HostAdapter->HostAdapterType = ProbeInfo->HostAdapterType; - HostAdapter->HostAdapterBusType = ProbeInfo->HostAdapterBusType; - HostAdapter->IO_Address = ProbeInfo->IO_Address; - HostAdapter->PCI_Address = ProbeInfo->PCI_Address; - HostAdapter->Bus = ProbeInfo->Bus; - HostAdapter->Device = ProbeInfo->Device; - HostAdapter->PCI_Device = ProbeInfo->PCI_Device; - HostAdapter->IRQ_Channel = ProbeInfo->IRQ_Channel; - HostAdapter->AddressCount = BusLogic_HostAdapterAddressCount[HostAdapter->HostAdapterType]; + memset(myadapter, 0, sizeof(struct blogic_adapter)); + myadapter->adapter_type = probeinfo->adapter_type; + myadapter->adapter_bus_type = probeinfo->adapter_bus_type; + myadapter->io_addr = probeinfo->io_addr; + myadapter->pci_addr = probeinfo->pci_addr; + myadapter->bus = probeinfo->bus; + myadapter->dev = probeinfo->dev; + myadapter->pci_device = probeinfo->pci_device; + myadapter->irq_ch = probeinfo->irq_ch; + myadapter->addr_count = + blogic_adapter_addr_count[myadapter->adapter_type]; /* Make sure region is free prior to probing. */ - if (!request_region(HostAdapter->IO_Address, HostAdapter->AddressCount, + if (!request_region(myadapter->io_addr, myadapter->addr_count, "BusLogic")) continue; /* - Probe the Host Adapter. If unsuccessful, abort further initialization. + Probe the Host Adapter. If unsuccessful, abort further + initialization. */ - if (!BusLogic_ProbeHostAdapter(HostAdapter)) { - release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); + if (!blogic_probe(myadapter)) { + release_region(myadapter->io_addr, + myadapter->addr_count); continue; } /* Hard Reset the Host Adapter. If unsuccessful, abort further initialization. */ - if (!BusLogic_HardwareResetHostAdapter(HostAdapter, true)) { - release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); + if (!blogic_hwreset(myadapter, true)) { + release_region(myadapter->io_addr, + myadapter->addr_count); continue; } /* - Check the Host Adapter. If unsuccessful, abort further initialization. + Check the Host Adapter. If unsuccessful, abort further + initialization. */ - if (!BusLogic_CheckHostAdapter(HostAdapter)) { - release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); + if (!blogic_checkadapter(myadapter)) { + release_region(myadapter->io_addr, + myadapter->addr_count); continue; } /* Initialize the Driver Options field if provided. */ - if (DriverOptionsIndex < BusLogic_DriverOptionsCount) - HostAdapter->DriverOptions = &BusLogic_DriverOptions[DriverOptionsIndex++]; + if (drvr_optindex < blogic_drvr_options_count) + myadapter->drvr_opts = + &blogic_drvr_options[drvr_optindex++]; /* - Announce the Driver Version and Date, Author's Name, Copyright Notice, - and Electronic Mail Address. + Announce the Driver Version and Date, Author's Name, + Copyright Notice, and Electronic Mail Address. */ - BusLogic_AnnounceDriver(HostAdapter); + blogic_announce_drvr(myadapter); /* Register the SCSI Host structure. */ - Host = scsi_host_alloc(&Bus_Logic_template, sizeof(struct BusLogic_HostAdapter)); - if (Host == NULL) { - release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); + host = scsi_host_alloc(&blogic_template, + sizeof(struct blogic_adapter)); + if (host == NULL) { + release_region(myadapter->io_addr, + myadapter->addr_count); continue; } - HostAdapter = (struct BusLogic_HostAdapter *) Host->hostdata; - memcpy(HostAdapter, PrototypeHostAdapter, sizeof(struct BusLogic_HostAdapter)); - HostAdapter->SCSI_Host = Host; - HostAdapter->HostNumber = Host->host_no; + myadapter = (struct blogic_adapter *) host->hostdata; + memcpy(myadapter, adapter, sizeof(struct blogic_adapter)); + myadapter->scsi_host = host; + myadapter->host_no = host->host_no; /* - Add Host Adapter to the end of the list of registered BusLogic - Host Adapters. + Add Host Adapter to the end of the list of registered + BusLogic Host Adapters. */ - list_add_tail(&HostAdapter->host_list, &BusLogic_host_list); + list_add_tail(&myadapter->host_list, &blogic_host_list); /* - Read the Host Adapter Configuration, Configure the Host Adapter, - Acquire the System Resources necessary to use the Host Adapter, then - Create the Initial CCBs, Initialize the Host Adapter, and finally - perform Target Device Inquiry. - - From this point onward, any failure will be assumed to be due to a - problem with the Host Adapter, rather than due to having mistakenly - identified this port as belonging to a BusLogic Host Adapter. The - I/O Address range will not be released, thereby preventing it from - being incorrectly identified as any other type of Host Adapter. + Read the Host Adapter Configuration, Configure the Host + Adapter, Acquire the System Resources necessary to use + the Host Adapter, then Create the Initial CCBs, Initialize + the Host Adapter, and finally perform Target Device + Inquiry. From this point onward, any failure will be + assumed to be due to a problem with the Host Adapter, + rather than due to having mistakenly identified this port + as belonging to a BusLogic Host Adapter. The I/O Address + range will not be released, thereby preventing it from + being incorrectly identified as any other type of Host + Adapter. */ - if (BusLogic_ReadHostAdapterConfiguration(HostAdapter) && - BusLogic_ReportHostAdapterConfiguration(HostAdapter) && - BusLogic_AcquireResources(HostAdapter) && - BusLogic_CreateInitialCCBs(HostAdapter) && - BusLogic_InitializeHostAdapter(HostAdapter) && - BusLogic_TargetDeviceInquiry(HostAdapter)) { + if (blogic_rdconfig(myadapter) && + blogic_reportconfig(myadapter) && + blogic_getres(myadapter) && + blogic_create_initccbs(myadapter) && + blogic_initadapter(myadapter) && + blogic_inquiry(myadapter)) { /* - Initialization has been completed successfully. Release and - re-register usage of the I/O Address range so that the Model - Name of the Host Adapter will appear, and initialize the SCSI - Host structure. + Initialization has been completed successfully. + Release and re-register usage of the I/O Address + range so that the Model Name of the Host Adapter + will appear, and initialize the SCSI Host structure. */ - release_region(HostAdapter->IO_Address, - HostAdapter->AddressCount); - if (!request_region(HostAdapter->IO_Address, - HostAdapter->AddressCount, - HostAdapter->FullModelName)) { + release_region(myadapter->io_addr, + myadapter->addr_count); + if (!request_region(myadapter->io_addr, + myadapter->addr_count, + myadapter->full_model)) { printk(KERN_WARNING "BusLogic: Release and re-register of " "port 0x%04lx failed \n", - (unsigned long)HostAdapter->IO_Address); - BusLogic_DestroyCCBs(HostAdapter); - BusLogic_ReleaseResources(HostAdapter); - list_del(&HostAdapter->host_list); - scsi_host_put(Host); + (unsigned long)myadapter->io_addr); + blogic_destroy_ccbs(myadapter); + blogic_relres(myadapter); + list_del(&myadapter->host_list); + scsi_host_put(host); ret = -ENOMEM; } else { - BusLogic_InitializeHostStructure(HostAdapter, - Host); - if (scsi_add_host(Host, HostAdapter->PCI_Device - ? &HostAdapter->PCI_Device->dev + blogic_inithoststruct(myadapter, + host); + if (scsi_add_host(host, myadapter->pci_device + ? &myadapter->pci_device->dev : NULL)) { printk(KERN_WARNING "BusLogic: scsi_add_host()" "failed!\n"); - BusLogic_DestroyCCBs(HostAdapter); - BusLogic_ReleaseResources(HostAdapter); - list_del(&HostAdapter->host_list); - scsi_host_put(Host); + blogic_destroy_ccbs(myadapter); + blogic_relres(myadapter); + list_del(&myadapter->host_list); + scsi_host_put(host); ret = -ENODEV; } else { - scsi_scan_host(Host); - BusLogicHostAdapterCount++; + scsi_scan_host(host); + adapter_count++; } } } else { /* - An error occurred during Host Adapter Configuration Querying, Host - Adapter Configuration, Resource Acquisition, CCB Creation, Host - Adapter Initialization, or Target Device Inquiry, so remove Host - Adapter from the list of registered BusLogic Host Adapters, destroy - the CCBs, Release the System Resources, and Unregister the SCSI + An error occurred during Host Adapter Configuration + Querying, Host Adapter Configuration, Resource + Acquisition, CCB Creation, Host Adapter + Initialization, or Target Device Inquiry, so + remove Host Adapter from the list of registered + BusLogic Host Adapters, destroy the CCBs, Release + the System Resources, and Unregister the SCSI Host. */ - BusLogic_DestroyCCBs(HostAdapter); - BusLogic_ReleaseResources(HostAdapter); - list_del(&HostAdapter->host_list); - scsi_host_put(Host); + blogic_destroy_ccbs(myadapter); + blogic_relres(myadapter); + list_del(&myadapter->host_list); + scsi_host_put(host); ret = -ENODEV; } } - kfree(PrototypeHostAdapter); - kfree(BusLogic_ProbeInfoList); - BusLogic_ProbeInfoList = NULL; + kfree(adapter); + kfree(blogic_probeinfo_list); + blogic_probeinfo_list = NULL; return ret; } /* - BusLogic_ReleaseHostAdapter releases all resources previously acquired to + blogic_deladapter releases all resources previously acquired to support a specific Host Adapter, including the I/O Address range, and unregisters the BusLogic Host Adapter. */ -static int __exit BusLogic_ReleaseHostAdapter(struct BusLogic_HostAdapter *HostAdapter) +static int __exit blogic_deladapter(struct blogic_adapter *adapter) { - struct Scsi_Host *Host = HostAdapter->SCSI_Host; + struct Scsi_Host *host = adapter->scsi_host; - scsi_remove_host(Host); + scsi_remove_host(host); /* FlashPoint Host Adapters must first be released by the FlashPoint SCCB Manager. */ - if (BusLogic_FlashPointHostAdapterP(HostAdapter)) - FlashPoint_ReleaseHostAdapter(HostAdapter->CardHandle); + if (blogic_flashpoint_type(adapter)) + FlashPoint_ReleaseHostAdapter(adapter->cardhandle); /* Destroy the CCBs and release any system resources acquired to support Host Adapter. */ - BusLogic_DestroyCCBs(HostAdapter); - BusLogic_ReleaseResources(HostAdapter); + blogic_destroy_ccbs(adapter); + blogic_relres(adapter); /* Release usage of the I/O Address range. */ - release_region(HostAdapter->IO_Address, HostAdapter->AddressCount); + release_region(adapter->io_addr, adapter->addr_count); /* - Remove Host Adapter from the list of registered BusLogic Host Adapters. + Remove Host Adapter from the list of registered BusLogic + Host Adapters. */ - list_del(&HostAdapter->host_list); + list_del(&adapter->host_list); - scsi_host_put(Host); + scsi_host_put(host); return 0; } /* - BusLogic_QueueCompletedCCB queues CCB for completion processing. + blogic_qcompleted_ccb queues CCB for completion processing. */ -static void BusLogic_QueueCompletedCCB(struct BusLogic_CCB *CCB) +static void blogic_qcompleted_ccb(struct blogic_ccb *ccb) { - struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter; - CCB->Status = BusLogic_CCB_Completed; - CCB->Next = NULL; - if (HostAdapter->FirstCompletedCCB == NULL) { - HostAdapter->FirstCompletedCCB = CCB; - HostAdapter->LastCompletedCCB = CCB; + struct blogic_adapter *adapter = ccb->adapter; + + ccb->status = BLOGIC_CCB_COMPLETE; + ccb->next = NULL; + if (adapter->firstccb == NULL) { + adapter->firstccb = ccb; + adapter->lastccb = ccb; } else { - HostAdapter->LastCompletedCCB->Next = CCB; - HostAdapter->LastCompletedCCB = CCB; + adapter->lastccb->next = ccb; + adapter->lastccb = ccb; } - HostAdapter->ActiveCommands[CCB->TargetID]--; + adapter->active_cmds[ccb->tgt_id]--; } /* - BusLogic_ComputeResultCode computes a SCSI Subsystem Result Code from + blogic_resultcode computes a SCSI Subsystem Result Code from the Host Adapter Status and Target Device Status. */ -static int BusLogic_ComputeResultCode(struct BusLogic_HostAdapter *HostAdapter, enum BusLogic_HostAdapterStatus HostAdapterStatus, enum BusLogic_TargetDeviceStatus TargetDeviceStatus) +static int blogic_resultcode(struct blogic_adapter *adapter, + enum blogic_adapter_status adapter_status, + enum blogic_tgt_status tgt_status) { - int HostStatus; - switch (HostAdapterStatus) { - case BusLogic_CommandCompletedNormally: - case BusLogic_LinkedCommandCompleted: - case BusLogic_LinkedCommandCompletedWithFlag: - HostStatus = DID_OK; + int hoststatus; + + switch (adapter_status) { + case BLOGIC_CMD_CMPLT_NORMAL: + case BLOGIC_LINK_CMD_CMPLT: + case BLOGIC_LINK_CMD_CMPLT_FLAG: + hoststatus = DID_OK; break; - case BusLogic_SCSISelectionTimeout: - HostStatus = DID_TIME_OUT; + case BLOGIC_SELECT_TIMEOUT: + hoststatus = DID_TIME_OUT; break; - case BusLogic_InvalidOutgoingMailboxActionCode: - case BusLogic_InvalidCommandOperationCode: - case BusLogic_InvalidCommandParameter: - BusLogic_Warning("BusLogic Driver Protocol Error 0x%02X\n", HostAdapter, HostAdapterStatus); - case BusLogic_DataUnderRun: - case BusLogic_DataOverRun: - case BusLogic_UnexpectedBusFree: - case BusLogic_LinkedCCBhasInvalidLUN: - case BusLogic_AutoRequestSenseFailed: - case BusLogic_TaggedQueuingMessageRejected: - case BusLogic_UnsupportedMessageReceived: - case BusLogic_HostAdapterHardwareFailed: - case BusLogic_TargetDeviceReconnectedImproperly: - case BusLogic_AbortQueueGenerated: - case BusLogic_HostAdapterSoftwareError: - case BusLogic_HostAdapterHardwareTimeoutError: - case BusLogic_SCSIParityErrorDetected: - HostStatus = DID_ERROR; + case BLOGIC_INVALID_OUTBOX_CODE: + case BLOGIC_INVALID_CMD_CODE: + case BLOGIC_BAD_CMD_PARAM: + blogic_warn("BusLogic Driver Protocol Error 0x%02X\n", + adapter, adapter_status); + case BLOGIC_DATA_UNDERRUN: + case BLOGIC_DATA_OVERRUN: + case BLOGIC_NOEXPECT_BUSFREE: + case BLOGIC_LINKCCB_BADLUN: + case BLOGIC_AUTOREQSENSE_FAIL: + case BLOGIC_TAGQUEUE_REJECT: + case BLOGIC_BAD_MSG_RCVD: + case BLOGIC_HW_FAIL: + case BLOGIC_BAD_RECONNECT: + case BLOGIC_ABRT_QUEUE: + case BLOGIC_ADAPTER_SW_ERROR: + case BLOGIC_HW_TIMEOUT: + case BLOGIC_PARITY_ERR: + hoststatus = DID_ERROR; break; - case BusLogic_InvalidBusPhaseRequested: - case BusLogic_TargetFailedResponseToATN: - case BusLogic_HostAdapterAssertedRST: - case BusLogic_OtherDeviceAssertedRST: - case BusLogic_HostAdapterAssertedBusDeviceReset: - HostStatus = DID_RESET; + case BLOGIC_INVALID_BUSPHASE: + case BLOGIC_NORESPONSE_TO_ATN: + case BLOGIC_HW_RESET: + case BLOGIC_RST_FROM_OTHERDEV: + case BLOGIC_HW_BDR: + hoststatus = DID_RESET; break; default: - BusLogic_Warning("Unknown Host Adapter Status 0x%02X\n", HostAdapter, HostAdapterStatus); - HostStatus = DID_ERROR; + blogic_warn("Unknown Host Adapter Status 0x%02X\n", adapter, + adapter_status); + hoststatus = DID_ERROR; break; } - return (HostStatus << 16) | TargetDeviceStatus; + return (hoststatus << 16) | tgt_status; } /* - BusLogic_ScanIncomingMailboxes scans the Incoming Mailboxes saving any + blogic_scan_inbox scans the Incoming Mailboxes saving any Incoming Mailbox entries for completion processing. */ -static void BusLogic_ScanIncomingMailboxes(struct BusLogic_HostAdapter *HostAdapter) +static void blogic_scan_inbox(struct blogic_adapter *adapter) { /* - Scan through the Incoming Mailboxes in Strict Round Robin fashion, saving - any completed CCBs for further processing. It is essential that for each - CCB and SCSI Command issued, command completion processing is performed - exactly once. Therefore, only Incoming Mailboxes with completion code - Command Completed Without Error, Command Completed With Error, or Command - Aborted At Host Request are saved for completion processing. When an - Incoming Mailbox has a completion code of Aborted Command Not Found, the - CCB had already completed or been aborted before the current Abort request - was processed, and so completion processing has already occurred and no - further action should be taken. - */ - struct BusLogic_IncomingMailbox *NextIncomingMailbox = HostAdapter->NextIncomingMailbox; - enum BusLogic_CompletionCode CompletionCode; - while ((CompletionCode = NextIncomingMailbox->CompletionCode) != BusLogic_IncomingMailboxFree) { + Scan through the Incoming Mailboxes in Strict Round Robin + fashion, saving any completed CCBs for further processing. It + is essential that for each CCB and SCSI Command issued, command + completion processing is performed exactly once. Therefore, + only Incoming Mailboxes with completion code Command Completed + Without Error, Command Completed With Error, or Command Aborted + At Host Request are saved for completion processing. When an + Incoming Mailbox has a completion code of Aborted Command Not + Found, the CCB had already completed or been aborted before the + current Abort request was processed, and so completion processing + has already occurred and no further action should be taken. + */ + struct blogic_inbox *next_inbox = adapter->next_inbox; + enum blogic_cmplt_code comp_code; + + while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) { /* - We are only allowed to do this because we limit our architectures we - run on to machines where bus_to_virt() actually works. There *needs* - to be a dma_addr_to_virt() in the new PCI DMA mapping interface to - replace bus_to_virt() or else this code is going to become very + We are only allowed to do this because we limit our + architectures we run on to machines where bus_to_virt( + actually works. There *needs* to be a dma_addr_to_virt() + in the new PCI DMA mapping interface to replace + bus_to_virt() or else this code is going to become very innefficient. */ - struct BusLogic_CCB *CCB = (struct BusLogic_CCB *) Bus_to_Virtual(NextIncomingMailbox->CCB); - if (CompletionCode != BusLogic_AbortedCommandNotFound) { - if (CCB->Status == BusLogic_CCB_Active || CCB->Status == BusLogic_CCB_Reset) { + struct blogic_ccb *ccb = + (struct blogic_ccb *) bus_to_virt(next_inbox->ccb); + if (comp_code != BLOGIC_CMD_NOTFOUND) { + if (ccb->status == BLOGIC_CCB_ACTIVE || + ccb->status == BLOGIC_CCB_RESET) { /* - Save the Completion Code for this CCB and queue the CCB - for completion processing. + Save the Completion Code for this CCB and + queue the CCB for completion processing. */ - CCB->CompletionCode = CompletionCode; - BusLogic_QueueCompletedCCB(CCB); + ccb->comp_code = comp_code; + blogic_qcompleted_ccb(ccb); } else { /* - If a CCB ever appears in an Incoming Mailbox and is not marked - as status Active or Reset, then there is most likely a bug in + If a CCB ever appears in an Incoming Mailbox + and is not marked as status Active or Reset, + then there is most likely a bug in the Host Adapter firmware. */ - BusLogic_Warning("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", HostAdapter, CCB->SerialNumber, CCB->Status); + blogic_warn("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", adapter, ccb->serial, ccb->status); } } - NextIncomingMailbox->CompletionCode = BusLogic_IncomingMailboxFree; - if (++NextIncomingMailbox > HostAdapter->LastIncomingMailbox) - NextIncomingMailbox = HostAdapter->FirstIncomingMailbox; + next_inbox->comp_code = BLOGIC_INBOX_FREE; + if (++next_inbox > adapter->last_inbox) + next_inbox = adapter->first_inbox; } - HostAdapter->NextIncomingMailbox = NextIncomingMailbox; + adapter->next_inbox = next_inbox; } /* - BusLogic_ProcessCompletedCCBs iterates over the completed CCBs for Host + blogic_process_ccbs iterates over the completed CCBs for Host Adapter setting the SCSI Command Result Codes, deallocating the CCBs, and calling the SCSI Subsystem Completion Routines. The Host Adapter's Lock should already have been acquired by the caller. */ -static void BusLogic_ProcessCompletedCCBs(struct BusLogic_HostAdapter *HostAdapter) +static void blogic_process_ccbs(struct blogic_adapter *adapter) { - if (HostAdapter->ProcessCompletedCCBsActive) + if (adapter->processing_ccbs) return; - HostAdapter->ProcessCompletedCCBsActive = true; - while (HostAdapter->FirstCompletedCCB != NULL) { - struct BusLogic_CCB *CCB = HostAdapter->FirstCompletedCCB; - struct scsi_cmnd *Command = CCB->Command; - HostAdapter->FirstCompletedCCB = CCB->Next; - if (HostAdapter->FirstCompletedCCB == NULL) - HostAdapter->LastCompletedCCB = NULL; + adapter->processing_ccbs = true; + while (adapter->firstccb != NULL) { + struct blogic_ccb *ccb = adapter->firstccb; + struct scsi_cmnd *command = ccb->command; + adapter->firstccb = ccb->next; + if (adapter->firstccb == NULL) + adapter->lastccb = NULL; /* Process the Completed CCB. */ - if (CCB->Opcode == BusLogic_BusDeviceReset) { - int TargetID = CCB->TargetID; - BusLogic_Warning("Bus Device Reset CCB #%ld to Target " "%d Completed\n", HostAdapter, CCB->SerialNumber, TargetID); - BusLogic_IncrementErrorCounter(&HostAdapter->TargetStatistics[TargetID].BusDeviceResetsCompleted); - HostAdapter->TargetFlags[TargetID].TaggedQueuingActive = false; - HostAdapter->CommandsSinceReset[TargetID] = 0; - HostAdapter->LastResetCompleted[TargetID] = jiffies; + if (ccb->opcode == BLOGIC_BDR) { + int tgt_id = ccb->tgt_id; + + blogic_warn("Bus Device Reset CCB #%ld to Target " "%d Completed\n", adapter, ccb->serial, tgt_id); + blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done); + adapter->tgt_flags[tgt_id].tagq_active = false; + adapter->cmds_since_rst[tgt_id] = 0; + adapter->last_resetdone[tgt_id] = jiffies; /* Place CCB back on the Host Adapter's free list. */ - BusLogic_DeallocateCCB(CCB); -#if 0 /* this needs to be redone different for new EH */ + blogic_dealloc_ccb(ccb, 1); +#if 0 /* this needs to be redone different for new EH */ /* - Bus Device Reset CCBs have the Command field non-NULL only when a - Bus Device Reset was requested for a Command that did not have a - currently active CCB in the Host Adapter (i.e., a Synchronous - Bus Device Reset), and hence would not have its Completion Routine - called otherwise. + Bus Device Reset CCBs have the command field + non-NULL only when a Bus Device Reset was requested + for a command that did not have a currently active + CCB in the Host Adapter (i.e., a Synchronous Bus + Device Reset), and hence would not have its + Completion Routine called otherwise. */ - while (Command != NULL) { - struct scsi_cmnd *NextCommand = Command->reset_chain; - Command->reset_chain = NULL; - Command->result = DID_RESET << 16; - Command->scsi_done(Command); - Command = NextCommand; + while (command != NULL) { + struct scsi_cmnd *nxt_cmd = + command->reset_chain; + command->reset_chain = NULL; + command->result = DID_RESET << 16; + command->scsi_done(command); + command = nxt_cmd; } #endif /* - Iterate over the CCBs for this Host Adapter performing completion - processing for any CCBs marked as Reset for this Target. + Iterate over the CCBs for this Host Adapter + performing completion processing for any CCBs + marked as Reset for this Target. */ - for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll) - if (CCB->Status == BusLogic_CCB_Reset && CCB->TargetID == TargetID) { - Command = CCB->Command; - BusLogic_DeallocateCCB(CCB); - HostAdapter->ActiveCommands[TargetID]--; - Command->result = DID_RESET << 16; - Command->scsi_done(Command); + for (ccb = adapter->all_ccbs; ccb != NULL; + ccb = ccb->next_all) + if (ccb->status == BLOGIC_CCB_RESET && + ccb->tgt_id == tgt_id) { + command = ccb->command; + blogic_dealloc_ccb(ccb, 1); + adapter->active_cmds[tgt_id]--; + command->result = DID_RESET << 16; + command->scsi_done(command); } - HostAdapter->BusDeviceResetPendingCCB[TargetID] = NULL; + adapter->bdr_pend[tgt_id] = NULL; } else { /* - Translate the Completion Code, Host Adapter Status, and Target - Device Status into a SCSI Subsystem Result Code. + Translate the Completion Code, Host Adapter Status, + and Target Device Status into a SCSI Subsystem + Result Code. */ - switch (CCB->CompletionCode) { - case BusLogic_IncomingMailboxFree: - case BusLogic_AbortedCommandNotFound: - case BusLogic_InvalidCCB: - BusLogic_Warning("CCB #%ld to Target %d Impossible State\n", HostAdapter, CCB->SerialNumber, CCB->TargetID); + switch (ccb->comp_code) { + case BLOGIC_INBOX_FREE: + case BLOGIC_CMD_NOTFOUND: + case BLOGIC_INVALID_CCB: + blogic_warn("CCB #%ld to Target %d Impossible State\n", adapter, ccb->serial, ccb->tgt_id); break; - case BusLogic_CommandCompletedWithoutError: - HostAdapter->TargetStatistics[CCB->TargetID] - .CommandsCompleted++; - HostAdapter->TargetFlags[CCB->TargetID] - .CommandSuccessfulFlag = true; - Command->result = DID_OK << 16; + case BLOGIC_CMD_COMPLETE_GOOD: + adapter->tgt_stats[ccb->tgt_id] + .cmds_complete++; + adapter->tgt_flags[ccb->tgt_id] + .cmd_good = true; + command->result = DID_OK << 16; break; - case BusLogic_CommandAbortedAtHostRequest: - BusLogic_Warning("CCB #%ld to Target %d Aborted\n", HostAdapter, CCB->SerialNumber, CCB->TargetID); - BusLogic_IncrementErrorCounter(&HostAdapter->TargetStatistics[CCB->TargetID] - .CommandAbortsCompleted); - Command->result = DID_ABORT << 16; + case BLOGIC_CMD_ABORT_BY_HOST: + blogic_warn("CCB #%ld to Target %d Aborted\n", + adapter, ccb->serial, ccb->tgt_id); + blogic_inc_count(&adapter->tgt_stats[ccb->tgt_id].aborts_done); + command->result = DID_ABORT << 16; break; - case BusLogic_CommandCompletedWithError: - Command->result = BusLogic_ComputeResultCode(HostAdapter, CCB->HostAdapterStatus, CCB->TargetDeviceStatus); - if (CCB->HostAdapterStatus != BusLogic_SCSISelectionTimeout) { - HostAdapter->TargetStatistics[CCB->TargetID] - .CommandsCompleted++; - if (BusLogic_GlobalOptions.TraceErrors) { + case BLOGIC_CMD_COMPLETE_ERROR: + command->result = blogic_resultcode(adapter, + ccb->adapter_status, ccb->tgt_status); + if (ccb->adapter_status != BLOGIC_SELECT_TIMEOUT) { + adapter->tgt_stats[ccb->tgt_id] + .cmds_complete++; + if (blogic_global_options.trace_err) { int i; - BusLogic_Notice("CCB #%ld Target %d: Result %X Host " - "Adapter Status %02X " "Target Status %02X\n", HostAdapter, CCB->SerialNumber, CCB->TargetID, Command->result, CCB->HostAdapterStatus, CCB->TargetDeviceStatus); - BusLogic_Notice("CDB ", HostAdapter); - for (i = 0; i < CCB->CDB_Length; i++) - BusLogic_Notice(" %02X", HostAdapter, CCB->CDB[i]); - BusLogic_Notice("\n", HostAdapter); - BusLogic_Notice("Sense ", HostAdapter); - for (i = 0; i < CCB->SenseDataLength; i++) - BusLogic_Notice(" %02X", HostAdapter, Command->sense_buffer[i]); - BusLogic_Notice("\n", HostAdapter); + blogic_notice("CCB #%ld Target %d: Result %X Host " + "Adapter Status %02X " "Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status); + blogic_notice("CDB ", adapter); + for (i = 0; i < ccb->cdblen; i++) + blogic_notice(" %02X", adapter, ccb->cdb[i]); + blogic_notice("\n", adapter); + blogic_notice("Sense ", adapter); + for (i = 0; i < ccb->sense_datalen; i++) + blogic_notice(" %02X", adapter, command->sense_buffer[i]); + blogic_notice("\n", adapter); } } break; @@ -2641,141 +2851,145 @@ static void BusLogic_ProcessCompletedCCBs(struct BusLogic_HostAdapter *HostAdapt CmdQue (Tagged Queuing Supported) and WBus16 (16 Bit Wide Data Transfers Supported) bits. */ - if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 && CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally) { - struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[CCB->TargetID]; - struct SCSI_Inquiry *InquiryResult = - (struct SCSI_Inquiry *) scsi_sglist(Command); - TargetFlags->TargetExists = true; - TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue; - TargetFlags->WideTransfersSupported = InquiryResult->WBus16; + if (ccb->cdb[0] == INQUIRY && ccb->cdb[1] == 0 && + ccb->adapter_status == BLOGIC_CMD_CMPLT_NORMAL) { + struct blogic_tgt_flags *tgt_flags = + &adapter->tgt_flags[ccb->tgt_id]; + struct scsi_inquiry *inquiry = + (struct scsi_inquiry *) scsi_sglist(command); + tgt_flags->tgt_exists = true; + tgt_flags->tagq_ok = inquiry->CmdQue; + tgt_flags->wide_ok = inquiry->WBus16; } /* Place CCB back on the Host Adapter's free list. */ - BusLogic_DeallocateCCB(CCB); + blogic_dealloc_ccb(ccb, 1); /* Call the SCSI Command Completion Routine. */ - Command->scsi_done(Command); + command->scsi_done(command); } } - HostAdapter->ProcessCompletedCCBsActive = false; + adapter->processing_ccbs = false; } /* - BusLogic_InterruptHandler handles hardware interrupts from BusLogic Host + blogic_inthandler handles hardware interrupts from BusLogic Host Adapters. */ -static irqreturn_t BusLogic_InterruptHandler(int IRQ_Channel, void *DeviceIdentifier) +static irqreturn_t blogic_inthandler(int irq_ch, void *devid) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) DeviceIdentifier; - unsigned long ProcessorFlags; + struct blogic_adapter *adapter = (struct blogic_adapter *) devid; + unsigned long processor_flag; /* Acquire exclusive access to Host Adapter. */ - spin_lock_irqsave(HostAdapter->SCSI_Host->host_lock, ProcessorFlags); + spin_lock_irqsave(adapter->scsi_host->host_lock, processor_flag); /* Handle Interrupts appropriately for each Host Adapter type. */ - if (BusLogic_MultiMasterHostAdapterP(HostAdapter)) { - union BusLogic_InterruptRegister InterruptRegister; + if (blogic_multimaster_type(adapter)) { + union blogic_int_reg intreg; /* Read the Host Adapter Interrupt Register. */ - InterruptRegister.All = BusLogic_ReadInterruptRegister(HostAdapter); - if (InterruptRegister.ir.InterruptValid) { + intreg.all = blogic_rdint(adapter); + if (intreg.ir.int_valid) { /* Acknowledge the interrupt and reset the Host Adapter Interrupt Register. */ - BusLogic_InterruptReset(HostAdapter); + blogic_intreset(adapter); /* - Process valid External SCSI Bus Reset and Incoming Mailbox - Loaded Interrupts. Command Complete Interrupts are noted, - and Outgoing Mailbox Available Interrupts are ignored, as - they are never enabled. + Process valid External SCSI Bus Reset and Incoming + Mailbox Loaded Interrupts. Command Complete + Interrupts are noted, and Outgoing Mailbox Available + Interrupts are ignored, as they are never enabled. */ - if (InterruptRegister.ir.ExternalBusReset) - HostAdapter->HostAdapterExternalReset = true; - else if (InterruptRegister.ir.IncomingMailboxLoaded) - BusLogic_ScanIncomingMailboxes(HostAdapter); - else if (InterruptRegister.ir.CommandComplete) - HostAdapter->HostAdapterCommandCompleted = true; + if (intreg.ir.ext_busreset) + adapter->adapter_extreset = true; + else if (intreg.ir.mailin_loaded) + blogic_scan_inbox(adapter); + else if (intreg.ir.cmd_complete) + adapter->adapter_cmd_complete = true; } } else { /* Check if there is a pending interrupt for this Host Adapter. */ - if (FlashPoint_InterruptPending(HostAdapter->CardHandle)) - switch (FlashPoint_HandleInterrupt(HostAdapter->CardHandle)) { - case FlashPoint_NormalInterrupt: + if (FlashPoint_InterruptPending(adapter->cardhandle)) + switch (FlashPoint_HandleInterrupt(adapter->cardhandle)) { + case FPOINT_NORMAL_INT: break; - case FlashPoint_ExternalBusReset: - HostAdapter->HostAdapterExternalReset = true; + case FPOINT_EXT_RESET: + adapter->adapter_extreset = true; break; - case FlashPoint_InternalError: - BusLogic_Warning("Internal FlashPoint Error detected" " - Resetting Host Adapter\n", HostAdapter); - HostAdapter->HostAdapterInternalError = true; + case FPOINT_INTERN_ERR: + blogic_warn("Internal FlashPoint Error detected - Resetting Host Adapter\n", adapter); + adapter->adapter_intern_err = true; break; } } /* Process any completed CCBs. */ - if (HostAdapter->FirstCompletedCCB != NULL) - BusLogic_ProcessCompletedCCBs(HostAdapter); + if (adapter->firstccb != NULL) + blogic_process_ccbs(adapter); /* Reset the Host Adapter if requested. */ - if (HostAdapter->HostAdapterExternalReset) { - BusLogic_Warning("Resetting %s due to External SCSI Bus Reset\n", HostAdapter, HostAdapter->FullModelName); - BusLogic_IncrementErrorCounter(&HostAdapter->ExternalHostAdapterResets); - BusLogic_ResetHostAdapter(HostAdapter, false); - HostAdapter->HostAdapterExternalReset = false; - } else if (HostAdapter->HostAdapterInternalError) { - BusLogic_Warning("Resetting %s due to Host Adapter Internal Error\n", HostAdapter, HostAdapter->FullModelName); - BusLogic_IncrementErrorCounter(&HostAdapter->HostAdapterInternalErrors); - BusLogic_ResetHostAdapter(HostAdapter, true); - HostAdapter->HostAdapterInternalError = false; + if (adapter->adapter_extreset) { + blogic_warn("Resetting %s due to External SCSI Bus Reset\n", adapter, adapter->full_model); + blogic_inc_count(&adapter->ext_resets); + blogic_resetadapter(adapter, false); + adapter->adapter_extreset = false; + } else if (adapter->adapter_intern_err) { + blogic_warn("Resetting %s due to Host Adapter Internal Error\n", adapter, adapter->full_model); + blogic_inc_count(&adapter->adapter_intern_errors); + blogic_resetadapter(adapter, true); + adapter->adapter_intern_err = false; } /* Release exclusive access to Host Adapter. */ - spin_unlock_irqrestore(HostAdapter->SCSI_Host->host_lock, ProcessorFlags); + spin_unlock_irqrestore(adapter->scsi_host->host_lock, processor_flag); return IRQ_HANDLED; } /* - BusLogic_WriteOutgoingMailbox places CCB and Action Code into an Outgoing + blogic_write_outbox places CCB and Action Code into an Outgoing Mailbox for execution by Host Adapter. The Host Adapter's Lock should already have been acquired by the caller. */ -static bool BusLogic_WriteOutgoingMailbox(struct BusLogic_HostAdapter - *HostAdapter, enum BusLogic_ActionCode ActionCode, struct BusLogic_CCB *CCB) +static bool blogic_write_outbox(struct blogic_adapter *adapter, + enum blogic_action action, struct blogic_ccb *ccb) { - struct BusLogic_OutgoingMailbox *NextOutgoingMailbox; - NextOutgoingMailbox = HostAdapter->NextOutgoingMailbox; - if (NextOutgoingMailbox->ActionCode == BusLogic_OutgoingMailboxFree) { - CCB->Status = BusLogic_CCB_Active; + struct blogic_outbox *next_outbox; + + next_outbox = adapter->next_outbox; + if (next_outbox->action == BLOGIC_OUTBOX_FREE) { + ccb->status = BLOGIC_CCB_ACTIVE; /* - The CCB field must be written before the Action Code field since - the Host Adapter is operating asynchronously and the locking code - does not protect against simultaneous access by the Host Adapter. + The CCB field must be written before the Action Code field + since the Host Adapter is operating asynchronously and the + locking code does not protect against simultaneous access + by the Host Adapter. */ - NextOutgoingMailbox->CCB = CCB->DMA_Handle; - NextOutgoingMailbox->ActionCode = ActionCode; - BusLogic_StartMailboxCommand(HostAdapter); - if (++NextOutgoingMailbox > HostAdapter->LastOutgoingMailbox) - NextOutgoingMailbox = HostAdapter->FirstOutgoingMailbox; - HostAdapter->NextOutgoingMailbox = NextOutgoingMailbox; - if (ActionCode == BusLogic_MailboxStartCommand) { - HostAdapter->ActiveCommands[CCB->TargetID]++; - if (CCB->Opcode != BusLogic_BusDeviceReset) - HostAdapter->TargetStatistics[CCB->TargetID].CommandsAttempted++; + next_outbox->ccb = ccb->dma_handle; + next_outbox->action = action; + blogic_execmbox(adapter); + if (++next_outbox > adapter->last_outbox) + next_outbox = adapter->first_outbox; + adapter->next_outbox = next_outbox; + if (action == BLOGIC_MBOX_START) { + adapter->active_cmds[ccb->tgt_id]++; + if (ccb->opcode != BLOGIC_BDR) + adapter->tgt_stats[ccb->tgt_id].cmds_tried++; } return true; } @@ -2784,65 +2998,73 @@ static bool BusLogic_WriteOutgoingMailbox(struct BusLogic_HostAdapter /* Error Handling (EH) support */ -static int BusLogic_host_reset(struct scsi_cmnd * SCpnt) +static int blogic_hostreset(struct scsi_cmnd *SCpnt) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) SCpnt->device->host->hostdata; + struct blogic_adapter *adapter = + (struct blogic_adapter *) SCpnt->device->host->hostdata; unsigned int id = SCpnt->device->id; - struct BusLogic_TargetStatistics *stats = &HostAdapter->TargetStatistics[id]; + struct blogic_tgt_stats *stats = &adapter->tgt_stats[id]; int rc; spin_lock_irq(SCpnt->device->host->host_lock); - BusLogic_IncrementErrorCounter(&stats->HostAdapterResetsRequested); + blogic_inc_count(&stats->adatper_reset_req); - rc = BusLogic_ResetHostAdapter(HostAdapter, false); + rc = blogic_resetadapter(adapter, false); spin_unlock_irq(SCpnt->device->host->host_lock); return rc; } /* - BusLogic_QueueCommand creates a CCB for Command and places it into an + blogic_qcmd creates a CCB for Command and places it into an Outgoing Mailbox for execution by the associated Host Adapter. */ -static int BusLogic_QueueCommand_lck(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *)) +static int blogic_qcmd_lck(struct scsi_cmnd *command, + void (*comp_cb) (struct scsi_cmnd *)) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata; - struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[Command->device->id]; - struct BusLogic_TargetStatistics *TargetStatistics = HostAdapter->TargetStatistics; - unsigned char *CDB = Command->cmnd; - int CDB_Length = Command->cmd_len; - int TargetID = Command->device->id; - int LogicalUnit = Command->device->lun; - int BufferLength = scsi_bufflen(Command); - int Count; - struct BusLogic_CCB *CCB; - /* - SCSI REQUEST_SENSE commands will be executed automatically by the Host - Adapter for any errors, so they should not be executed explicitly unless - the Sense Data is zero indicating that no error occurred. - */ - if (CDB[0] == REQUEST_SENSE && Command->sense_buffer[0] != 0) { - Command->result = DID_OK << 16; - CompletionRoutine(Command); + struct blogic_adapter *adapter = + (struct blogic_adapter *) command->device->host->hostdata; + struct blogic_tgt_flags *tgt_flags = + &adapter->tgt_flags[command->device->id]; + struct blogic_tgt_stats *tgt_stats = adapter->tgt_stats; + unsigned char *cdb = command->cmnd; + int cdblen = command->cmd_len; + int tgt_id = command->device->id; + int lun = command->device->lun; + int buflen = scsi_bufflen(command); + int count; + struct blogic_ccb *ccb; + dma_addr_t sense_buf; + + /* + SCSI REQUEST_SENSE commands will be executed automatically by the + Host Adapter for any errors, so they should not be executed + explicitly unless the Sense Data is zero indicating that no error + occurred. + */ + if (cdb[0] == REQUEST_SENSE && command->sense_buffer[0] != 0) { + command->result = DID_OK << 16; + comp_cb(command); return 0; } /* - Allocate a CCB from the Host Adapter's free list. In the unlikely event - that there are none available and memory allocation fails, wait 1 second - and try again. If that fails, the Host Adapter is probably hung so signal - an error as a Host Adapter Hard Reset should be initiated soon. - */ - CCB = BusLogic_AllocateCCB(HostAdapter); - if (CCB == NULL) { - spin_unlock_irq(HostAdapter->SCSI_Host->host_lock); - BusLogic_Delay(1); - spin_lock_irq(HostAdapter->SCSI_Host->host_lock); - CCB = BusLogic_AllocateCCB(HostAdapter); - if (CCB == NULL) { - Command->result = DID_ERROR << 16; - CompletionRoutine(Command); + Allocate a CCB from the Host Adapter's free list. In the unlikely + event that there are none available and memory allocation fails, + wait 1 second and try again. If that fails, the Host Adapter is + probably hung so signal an error as a Host Adapter Hard Reset + should be initiated soon. + */ + ccb = blogic_alloc_ccb(adapter); + if (ccb == NULL) { + spin_unlock_irq(adapter->scsi_host->host_lock); + blogic_delay(1); + spin_lock_irq(adapter->scsi_host->host_lock); + ccb = blogic_alloc_ccb(adapter); + if (ccb == NULL) { + command->result = DID_ERROR << 16; + comp_cb(command); return 0; } } @@ -2850,217 +3072,248 @@ static int BusLogic_QueueCommand_lck(struct scsi_cmnd *Command, void (*Completio /* Initialize the fields in the BusLogic Command Control Block (CCB). */ - Count = scsi_dma_map(Command); - BUG_ON(Count < 0); - if (Count) { + count = scsi_dma_map(command); + BUG_ON(count < 0); + if (count) { struct scatterlist *sg; int i; - CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather; - CCB->DataLength = Count * sizeof(struct BusLogic_ScatterGatherSegment); - if (BusLogic_MultiMasterHostAdapterP(HostAdapter)) - CCB->DataPointer = (unsigned int) CCB->DMA_Handle + ((unsigned long) &CCB->ScatterGatherList - (unsigned long) CCB); + ccb->opcode = BLOGIC_INITIATOR_CCB_SG; + ccb->datalen = count * sizeof(struct blogic_sg_seg); + if (blogic_multimaster_type(adapter)) + ccb->data = (void *)((unsigned int) ccb->dma_handle + + ((unsigned long) &ccb->sglist - + (unsigned long) ccb)); else - CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList); + ccb->data = ccb->sglist; - scsi_for_each_sg(Command, sg, Count, i) { - CCB->ScatterGatherList[i].SegmentByteCount = - sg_dma_len(sg); - CCB->ScatterGatherList[i].SegmentDataPointer = - sg_dma_address(sg); + scsi_for_each_sg(command, sg, count, i) { + ccb->sglist[i].segbytes = sg_dma_len(sg); + ccb->sglist[i].segdata = sg_dma_address(sg); } - } else if (!Count) { - CCB->Opcode = BusLogic_InitiatorCCB; - CCB->DataLength = BufferLength; - CCB->DataPointer = 0; + } else if (!count) { + ccb->opcode = BLOGIC_INITIATOR_CCB; + ccb->datalen = buflen; + ccb->data = 0; } - switch (CDB[0]) { + switch (cdb[0]) { case READ_6: case READ_10: - CCB->DataDirection = BusLogic_DataInLengthChecked; - TargetStatistics[TargetID].ReadCommands++; - BusLogic_IncrementByteCounter(&TargetStatistics[TargetID].TotalBytesRead, BufferLength); - BusLogic_IncrementSizeBucket(TargetStatistics[TargetID].ReadCommandSizeBuckets, BufferLength); + ccb->datadir = BLOGIC_DATAIN_CHECKED; + tgt_stats[tgt_id].read_cmds++; + blogic_addcount(&tgt_stats[tgt_id].bytesread, buflen); + blogic_incszbucket(tgt_stats[tgt_id].read_sz_buckets, buflen); break; case WRITE_6: case WRITE_10: - CCB->DataDirection = BusLogic_DataOutLengthChecked; - TargetStatistics[TargetID].WriteCommands++; - BusLogic_IncrementByteCounter(&TargetStatistics[TargetID].TotalBytesWritten, BufferLength); - BusLogic_IncrementSizeBucket(TargetStatistics[TargetID].WriteCommandSizeBuckets, BufferLength); + ccb->datadir = BLOGIC_DATAOUT_CHECKED; + tgt_stats[tgt_id].write_cmds++; + blogic_addcount(&tgt_stats[tgt_id].byteswritten, buflen); + blogic_incszbucket(tgt_stats[tgt_id].write_sz_buckets, buflen); break; default: - CCB->DataDirection = BusLogic_UncheckedDataTransfer; + ccb->datadir = BLOGIC_UNCHECKED_TX; break; } - CCB->CDB_Length = CDB_Length; - CCB->HostAdapterStatus = 0; - CCB->TargetDeviceStatus = 0; - CCB->TargetID = TargetID; - CCB->LogicalUnit = LogicalUnit; - CCB->TagEnable = false; - CCB->LegacyTagEnable = false; - /* - BusLogic recommends that after a Reset the first couple of commands that - are sent to a Target Device be sent in a non Tagged Queue fashion so that - the Host Adapter and Target Device can establish Synchronous and Wide - Transfer before Queue Tag messages can interfere with the Synchronous and - Wide Negotiation messages. By waiting to enable Tagged Queuing until after - the first BusLogic_MaxTaggedQueueDepth commands have been queued, it is - assured that after a Reset any pending commands are requeued before Tagged - Queuing is enabled and that the Tagged Queuing message will not occur while - the partition table is being printed. In addition, some devices do not - properly handle the transition from non-tagged to tagged commands, so it is - necessary to wait until there are no pending commands for a target device + ccb->cdblen = cdblen; + ccb->adapter_status = 0; + ccb->tgt_status = 0; + ccb->tgt_id = tgt_id; + ccb->lun = lun; + ccb->tag_enable = false; + ccb->legacytag_enable = false; + /* + BusLogic recommends that after a Reset the first couple of + commands that are sent to a Target Device be sent in a non + Tagged Queue fashion so that the Host Adapter and Target Device + can establish Synchronous and Wide Transfer before Queue Tag + messages can interfere with the Synchronous and Wide Negotiation + messages. By waiting to enable Tagged Queuing until after the + first BLOGIC_MAX_TAG_DEPTH commands have been queued, it is + assured that after a Reset any pending commands are requeued + before Tagged Queuing is enabled and that the Tagged Queuing + message will not occur while the partition table is being printed. + In addition, some devices do not properly handle the transition + from non-tagged to tagged commands, so it is necessary to wait + until there are no pending commands for a target device before queuing tagged commands. */ - if (HostAdapter->CommandsSinceReset[TargetID]++ >= - BusLogic_MaxTaggedQueueDepth && !TargetFlags->TaggedQueuingActive && HostAdapter->ActiveCommands[TargetID] == 0 && TargetFlags->TaggedQueuingSupported && (HostAdapter->TaggedQueuingPermitted & (1 << TargetID))) { - TargetFlags->TaggedQueuingActive = true; - BusLogic_Notice("Tagged Queuing now active for Target %d\n", HostAdapter, TargetID); - } - if (TargetFlags->TaggedQueuingActive) { - enum BusLogic_QueueTag QueueTag = BusLogic_SimpleQueueTag; + if (adapter->cmds_since_rst[tgt_id]++ >= BLOGIC_MAX_TAG_DEPTH && + !tgt_flags->tagq_active && + adapter->active_cmds[tgt_id] == 0 + && tgt_flags->tagq_ok && + (adapter->tagq_ok & (1 << tgt_id))) { + tgt_flags->tagq_active = true; + blogic_notice("Tagged Queuing now active for Target %d\n", + adapter, tgt_id); + } + if (tgt_flags->tagq_active) { + enum blogic_queuetag queuetag = BLOGIC_SIMPLETAG; /* - When using Tagged Queuing with Simple Queue Tags, it appears that disk - drive controllers do not guarantee that a queued command will not - remain in a disconnected state indefinitely if commands that read or - write nearer the head position continue to arrive without interruption. - Therefore, for each Target Device this driver keeps track of the last - time either the queue was empty or an Ordered Queue Tag was issued. If - more than 4 seconds (one fifth of the 20 second disk timeout) have - elapsed since this last sequence point, this command will be issued - with an Ordered Queue Tag rather than a Simple Queue Tag, which forces - the Target Device to complete all previously queued commands before - this command may be executed. + When using Tagged Queuing with Simple Queue Tags, it + appears that disk drive controllers do not guarantee that + a queued command will not remain in a disconnected state + indefinitely if commands that read or write nearer the + head position continue to arrive without interruption. + Therefore, for each Target Device this driver keeps track + of the last time either the queue was empty or an Ordered + Queue Tag was issued. If more than 4 seconds (one fifth + of the 20 second disk timeout) have elapsed since this + last sequence point, this command will be issued with an + Ordered Queue Tag rather than a Simple Queue Tag, which + forces the Target Device to complete all previously + queued commands before this command may be executed. */ - if (HostAdapter->ActiveCommands[TargetID] == 0) - HostAdapter->LastSequencePoint[TargetID] = jiffies; - else if (time_after(jiffies, HostAdapter->LastSequencePoint[TargetID] + 4 * HZ)) { - HostAdapter->LastSequencePoint[TargetID] = jiffies; - QueueTag = BusLogic_OrderedQueueTag; + if (adapter->active_cmds[tgt_id] == 0) + adapter->last_seqpoint[tgt_id] = jiffies; + else if (time_after(jiffies, + adapter->last_seqpoint[tgt_id] + 4 * HZ)) { + adapter->last_seqpoint[tgt_id] = jiffies; + queuetag = BLOGIC_ORDEREDTAG; } - if (HostAdapter->ExtendedLUNSupport) { - CCB->TagEnable = true; - CCB->QueueTag = QueueTag; + if (adapter->ext_lun) { + ccb->tag_enable = true; + ccb->queuetag = queuetag; } else { - CCB->LegacyTagEnable = true; - CCB->LegacyQueueTag = QueueTag; + ccb->legacytag_enable = true; + ccb->legacy_tag = queuetag; } } - memcpy(CCB->CDB, CDB, CDB_Length); - CCB->SenseDataLength = SCSI_SENSE_BUFFERSIZE; - CCB->SenseDataPointer = pci_map_single(HostAdapter->PCI_Device, Command->sense_buffer, CCB->SenseDataLength, PCI_DMA_FROMDEVICE); - CCB->Command = Command; - Command->scsi_done = CompletionRoutine; - if (BusLogic_MultiMasterHostAdapterP(HostAdapter)) { + memcpy(ccb->cdb, cdb, cdblen); + ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; + ccb->command = command; + sense_buf = pci_map_single(adapter->pci_device, + command->sense_buffer, ccb->sense_datalen, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { + blogic_err("DMA mapping for sense data buffer failed\n", + adapter); + blogic_dealloc_ccb(ccb, 0); + return SCSI_MLQUEUE_HOST_BUSY; + } + ccb->sensedata = sense_buf; + command->scsi_done = comp_cb; + if (blogic_multimaster_type(adapter)) { /* - Place the CCB in an Outgoing Mailbox. The higher levels of the SCSI - Subsystem should not attempt to queue more commands than can be placed - in Outgoing Mailboxes, so there should always be one free. In the - unlikely event that there are none available, wait 1 second and try - again. If that fails, the Host Adapter is probably hung so signal an - error as a Host Adapter Hard Reset should be initiated soon. + Place the CCB in an Outgoing Mailbox. The higher levels + of the SCSI Subsystem should not attempt to queue more + commands than can be placed in Outgoing Mailboxes, so + there should always be one free. In the unlikely event + that there are none available, wait 1 second and try + again. If that fails, the Host Adapter is probably hung + so signal an error as a Host Adapter Hard Reset should + be initiated soon. */ - if (!BusLogic_WriteOutgoingMailbox(HostAdapter, BusLogic_MailboxStartCommand, CCB)) { - spin_unlock_irq(HostAdapter->SCSI_Host->host_lock); - BusLogic_Warning("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", HostAdapter); - BusLogic_Delay(1); - spin_lock_irq(HostAdapter->SCSI_Host->host_lock); - if (!BusLogic_WriteOutgoingMailbox(HostAdapter, BusLogic_MailboxStartCommand, CCB)) { - BusLogic_Warning("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", HostAdapter); - BusLogic_DeallocateCCB(CCB); - Command->result = DID_ERROR << 16; - Command->scsi_done(Command); + if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { + spin_unlock_irq(adapter->scsi_host->host_lock); + blogic_warn("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", adapter); + blogic_delay(1); + spin_lock_irq(adapter->scsi_host->host_lock); + if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, + ccb)) { + blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter); + blogic_dealloc_ccb(ccb, 1); + command->result = DID_ERROR << 16; + command->scsi_done(command); } } } else { /* - Call the FlashPoint SCCB Manager to start execution of the CCB. + Call the FlashPoint SCCB Manager to start execution of + the CCB. */ - CCB->Status = BusLogic_CCB_Active; - HostAdapter->ActiveCommands[TargetID]++; - TargetStatistics[TargetID].CommandsAttempted++; - FlashPoint_StartCCB(HostAdapter->CardHandle, CCB); + ccb->status = BLOGIC_CCB_ACTIVE; + adapter->active_cmds[tgt_id]++; + tgt_stats[tgt_id].cmds_tried++; + FlashPoint_StartCCB(adapter->cardhandle, ccb); /* - The Command may have already completed and BusLogic_QueueCompletedCCB - been called, or it may still be pending. + The Command may have already completed and + blogic_qcompleted_ccb been called, or it may still be + pending. */ - if (CCB->Status == BusLogic_CCB_Completed) - BusLogic_ProcessCompletedCCBs(HostAdapter); + if (ccb->status == BLOGIC_CCB_COMPLETE) + blogic_process_ccbs(adapter); } return 0; } -static DEF_SCSI_QCMD(BusLogic_QueueCommand) +static DEF_SCSI_QCMD(blogic_qcmd) #if 0 /* - BusLogic_AbortCommand aborts Command if possible. + blogic_abort aborts Command if possible. */ -static int BusLogic_AbortCommand(struct scsi_cmnd *Command) +static int blogic_abort(struct scsi_cmnd *command) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata; + struct blogic_adapter *adapter = + (struct blogic_adapter *) command->device->host->hostdata; + + int tgt_id = command->device->id; + struct blogic_ccb *ccb; + blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_request); - int TargetID = Command->device->id; - struct BusLogic_CCB *CCB; - BusLogic_IncrementErrorCounter(&HostAdapter->TargetStatistics[TargetID].CommandAbortsRequested); /* - Attempt to find an Active CCB for this Command. If no Active CCB for this - Command is found, then no Abort is necessary. + Attempt to find an Active CCB for this Command. If no Active + CCB for this Command is found, then no Abort is necessary. */ - for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll) - if (CCB->Command == Command) + for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) + if (ccb->command == command) break; - if (CCB == NULL) { - BusLogic_Warning("Unable to Abort Command to Target %d - " "No CCB Found\n", HostAdapter, TargetID); + if (ccb == NULL) { + blogic_warn("Unable to Abort Command to Target %d - No CCB Found\n", adapter, tgt_id); return SUCCESS; - } else if (CCB->Status == BusLogic_CCB_Completed) { - BusLogic_Warning("Unable to Abort Command to Target %d - " "CCB Completed\n", HostAdapter, TargetID); + } else if (ccb->status == BLOGIC_CCB_COMPLETE) { + blogic_warn("Unable to Abort Command to Target %d - CCB Completed\n", adapter, tgt_id); return SUCCESS; - } else if (CCB->Status == BusLogic_CCB_Reset) { - BusLogic_Warning("Unable to Abort Command to Target %d - " "CCB Reset\n", HostAdapter, TargetID); + } else if (ccb->status == BLOGIC_CCB_RESET) { + blogic_warn("Unable to Abort Command to Target %d - CCB Reset\n", adapter, tgt_id); return SUCCESS; } - if (BusLogic_MultiMasterHostAdapterP(HostAdapter)) { + if (blogic_multimaster_type(adapter)) { /* - Attempt to Abort this CCB. MultiMaster Firmware versions prior to 5.xx - do not generate Abort Tag messages, but only generate the non-tagged - Abort message. Since non-tagged commands are not sent by the Host - Adapter until the queue of outstanding tagged commands has completed, - and the Abort message is treated as a non-tagged command, it is - effectively impossible to abort commands when Tagged Queuing is active. - Firmware version 5.xx does generate Abort Tag messages, so it is - possible to abort commands when Tagged Queuing is active. + Attempt to Abort this CCB. MultiMaster Firmware versions + prior to 5.xx do not generate Abort Tag messages, but only + generate the non-tagged Abort message. Since non-tagged + commands are not sent by the Host Adapter until the queue + of outstanding tagged commands has completed, and the + Abort message is treated as a non-tagged command, it is + effectively impossible to abort commands when Tagged + Queuing is active. Firmware version 5.xx does generate + Abort Tag messages, so it is possible to abort commands + when Tagged Queuing is active. */ - if (HostAdapter->TargetFlags[TargetID].TaggedQueuingActive && HostAdapter->FirmwareVersion[0] < '5') { - BusLogic_Warning("Unable to Abort CCB #%ld to Target %d - " "Abort Tag Not Supported\n", HostAdapter, CCB->SerialNumber, TargetID); + if (adapter->tgt_flags[tgt_id].tagq_active && + adapter->fw_ver[0] < '5') { + blogic_warn("Unable to Abort CCB #%ld to Target %d - Abort Tag Not Supported\n", adapter, ccb->serial, tgt_id); return FAILURE; - } else if (BusLogic_WriteOutgoingMailbox(HostAdapter, BusLogic_MailboxAbortCommand, CCB)) { - BusLogic_Warning("Aborting CCB #%ld to Target %d\n", HostAdapter, CCB->SerialNumber, TargetID); - BusLogic_IncrementErrorCounter(&HostAdapter->TargetStatistics[TargetID].CommandAbortsAttempted); + } else if (blogic_write_outbox(adapter, BLOGIC_MBOX_ABORT, + ccb)) { + blogic_warn("Aborting CCB #%ld to Target %d\n", + adapter, ccb->serial, tgt_id); + blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried); return SUCCESS; } else { - BusLogic_Warning("Unable to Abort CCB #%ld to Target %d - " "No Outgoing Mailboxes\n", HostAdapter, CCB->SerialNumber, TargetID); + blogic_warn("Unable to Abort CCB #%ld to Target %d - No Outgoing Mailboxes\n", adapter, ccb->serial, tgt_id); return FAILURE; } } else { /* - Call the FlashPoint SCCB Manager to abort execution of the CCB. + Call the FlashPoint SCCB Manager to abort execution of + the CCB. */ - BusLogic_Warning("Aborting CCB #%ld to Target %d\n", HostAdapter, CCB->SerialNumber, TargetID); - BusLogic_IncrementErrorCounter(&HostAdapter->TargetStatistics[TargetID].CommandAbortsAttempted); - FlashPoint_AbortCCB(HostAdapter->CardHandle, CCB); + blogic_warn("Aborting CCB #%ld to Target %d\n", adapter, + ccb->serial, tgt_id); + blogic_inc_count(&adapter->tgt_stats[tgt_id].aborts_tried); + FlashPoint_AbortCCB(adapter->cardhandle, ccb); /* The Abort may have already been completed and - BusLogic_QueueCompletedCCB been called, or it + blogic_qcompleted_ccb been called, or it may still be pending. */ - if (CCB->Status == BusLogic_CCB_Completed) { - BusLogic_ProcessCompletedCCBs(HostAdapter); - } + if (ccb->status == BLOGIC_CCB_COMPLETE) + blogic_process_ccbs(adapter); return SUCCESS; } return SUCCESS; @@ -3068,21 +3321,23 @@ static int BusLogic_AbortCommand(struct scsi_cmnd *Command) #endif /* - BusLogic_ResetHostAdapter resets Host Adapter if possible, marking all + blogic_resetadapter resets Host Adapter if possible, marking all currently executing SCSI Commands as having been Reset. */ -static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *HostAdapter, bool HardReset) +static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset) { - struct BusLogic_CCB *CCB; - int TargetID; + struct blogic_ccb *ccb; + int tgt_id; /* * Attempt to Reset and Reinitialize the Host Adapter. */ - if (!(BusLogic_HardwareResetHostAdapter(HostAdapter, HardReset) && BusLogic_InitializeHostAdapter(HostAdapter))) { - BusLogic_Error("Resetting %s Failed\n", HostAdapter, HostAdapter->FullModelName); + if (!(blogic_hwreset(adapter, hard_reset) && + blogic_initadapter(adapter))) { + blogic_err("Resetting %s Failed\n", adapter, + adapter->full_model); return FAILURE; } @@ -3090,9 +3345,9 @@ static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *HostAdapter, b * Deallocate all currently executing CCBs. */ - for (CCB = HostAdapter->All_CCBs; CCB != NULL; CCB = CCB->NextAll) - if (CCB->Status == BusLogic_CCB_Active) - BusLogic_DeallocateCCB(CCB); + for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all) + if (ccb->status == BLOGIC_CCB_ACTIVE) + blogic_dealloc_ccb(ccb, 1); /* * Wait a few seconds between the Host Adapter Hard Reset which * initiates a SCSI Bus Reset and issuing any SCSI Commands. Some @@ -3100,21 +3355,21 @@ static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *HostAdapter, b * after a SCSI Bus Reset. */ - if (HardReset) { - spin_unlock_irq(HostAdapter->SCSI_Host->host_lock); - BusLogic_Delay(HostAdapter->BusSettleTime); - spin_lock_irq(HostAdapter->SCSI_Host->host_lock); + if (hard_reset) { + spin_unlock_irq(adapter->scsi_host->host_lock); + blogic_delay(adapter->bus_settle_time); + spin_lock_irq(adapter->scsi_host->host_lock); } - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) { - HostAdapter->LastResetAttempted[TargetID] = jiffies; - HostAdapter->LastResetCompleted[TargetID] = jiffies; + for (tgt_id = 0; tgt_id < adapter->maxdev; tgt_id++) { + adapter->last_resettried[tgt_id] = jiffies; + adapter->last_resetdone[tgt_id] = jiffies; } return SUCCESS; } /* - BusLogic_BIOSDiskParameters returns the Heads/Sectors/Cylinders BIOS Disk + blogic_diskparam returns the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk. The default disk geometry is 64 heads, 32 sectors, and the appropriate number of cylinders so as not to exceed drive capacity. In order for disks equal to or larger than 1 GB to be addressable by the BIOS @@ -3130,66 +3385,70 @@ static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *HostAdapter, b the BIOS, and a warning may be displayed. */ -static int BusLogic_BIOSDiskParameters(struct scsi_device *sdev, struct block_device *Device, sector_t capacity, int *Parameters) +static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev, + sector_t capacity, int *params) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) sdev->host->hostdata; - struct BIOS_DiskParameters *DiskParameters = (struct BIOS_DiskParameters *) Parameters; + struct blogic_adapter *adapter = + (struct blogic_adapter *) sdev->host->hostdata; + struct bios_diskparam *diskparam = (struct bios_diskparam *) params; unsigned char *buf; - if (HostAdapter->ExtendedTranslationEnabled && capacity >= 2 * 1024 * 1024 /* 1 GB in 512 byte sectors */ ) { - if (capacity >= 4 * 1024 * 1024 /* 2 GB in 512 byte sectors */ ) { - DiskParameters->Heads = 255; - DiskParameters->Sectors = 63; + + if (adapter->ext_trans_enable && capacity >= 2 * 1024 * 1024 /* 1 GB in 512 byte sectors */) { + if (capacity >= 4 * 1024 * 1024 /* 2 GB in 512 byte sectors */) { + diskparam->heads = 255; + diskparam->sectors = 63; } else { - DiskParameters->Heads = 128; - DiskParameters->Sectors = 32; + diskparam->heads = 128; + diskparam->sectors = 32; } } else { - DiskParameters->Heads = 64; - DiskParameters->Sectors = 32; + diskparam->heads = 64; + diskparam->sectors = 32; } - DiskParameters->Cylinders = (unsigned long) capacity / (DiskParameters->Heads * DiskParameters->Sectors); - buf = scsi_bios_ptable(Device); + diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); + buf = scsi_bios_ptable(dev); if (buf == NULL) return 0; /* - If the boot sector partition table flag is valid, search for a partition - table entry whose end_head matches one of the standard BusLogic geometry - translations (64/32, 128/32, or 255/63). + If the boot sector partition table flag is valid, search for + a partition table entry whose end_head matches one of the + standard BusLogic geometry translations (64/32, 128/32, or 255/63). */ if (*(unsigned short *) (buf + 64) == 0xAA55) { - struct partition *FirstPartitionEntry = (struct partition *) buf; - struct partition *PartitionEntry = FirstPartitionEntry; - int SavedCylinders = DiskParameters->Cylinders, PartitionNumber; - unsigned char PartitionEntryEndHead = 0, PartitionEntryEndSector = 0; - for (PartitionNumber = 0; PartitionNumber < 4; PartitionNumber++) { - PartitionEntryEndHead = PartitionEntry->end_head; - PartitionEntryEndSector = PartitionEntry->end_sector & 0x3F; - if (PartitionEntryEndHead == 64 - 1) { - DiskParameters->Heads = 64; - DiskParameters->Sectors = 32; + struct partition *part1_entry = (struct partition *) buf; + struct partition *part_entry = part1_entry; + int saved_cyl = diskparam->cylinders, part_no; + unsigned char part_end_head = 0, part_end_sector = 0; + + for (part_no = 0; part_no < 4; part_no++) { + part_end_head = part_entry->end_head; + part_end_sector = part_entry->end_sector & 0x3F; + if (part_end_head == 64 - 1) { + diskparam->heads = 64; + diskparam->sectors = 32; break; - } else if (PartitionEntryEndHead == 128 - 1) { - DiskParameters->Heads = 128; - DiskParameters->Sectors = 32; + } else if (part_end_head == 128 - 1) { + diskparam->heads = 128; + diskparam->sectors = 32; break; - } else if (PartitionEntryEndHead == 255 - 1) { - DiskParameters->Heads = 255; - DiskParameters->Sectors = 63; + } else if (part_end_head == 255 - 1) { + diskparam->heads = 255; + diskparam->sectors = 63; break; } - PartitionEntry++; + part_entry++; } - if (PartitionNumber == 4) { - PartitionEntryEndHead = FirstPartitionEntry->end_head; - PartitionEntryEndSector = FirstPartitionEntry->end_sector & 0x3F; + if (part_no == 4) { + part_end_head = part1_entry->end_head; + part_end_sector = part1_entry->end_sector & 0x3F; } - DiskParameters->Cylinders = (unsigned long) capacity / (DiskParameters->Heads * DiskParameters->Sectors); - if (PartitionNumber < 4 && PartitionEntryEndSector == DiskParameters->Sectors) { - if (DiskParameters->Cylinders != SavedCylinders) - BusLogic_Warning("Adopting Geometry %d/%d from Partition Table\n", HostAdapter, DiskParameters->Heads, DiskParameters->Sectors); - } else if (PartitionEntryEndHead > 0 || PartitionEntryEndSector > 0) { - BusLogic_Warning("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", HostAdapter, PartitionEntryEndHead + 1, PartitionEntryEndSector); - BusLogic_Warning("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", HostAdapter, DiskParameters->Heads, DiskParameters->Sectors); + diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors); + if (part_no < 4 && part_end_sector == diskparam->sectors) { + if (diskparam->cylinders != saved_cyl) + blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors); + } else if (part_end_head > 0 || part_end_sector > 0) { + blogic_warn("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector); + blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); } } kfree(buf); @@ -3201,186 +3460,182 @@ static int BusLogic_BIOSDiskParameters(struct scsi_device *sdev, struct block_de BugLogic_ProcDirectoryInfo implements /proc/scsi/BusLogic/<N>. */ -static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *shost, char *ProcBuffer, char **StartPointer, off_t Offset, int BytesAvailable, int WriteFlag) +static int blogic_write_info(struct Scsi_Host *shost, char *procbuf, + int bytes_avail) { - struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) shost->hostdata; - struct BusLogic_TargetStatistics *TargetStatistics; - int TargetID, Length; - char *Buffer; - - TargetStatistics = HostAdapter->TargetStatistics; - if (WriteFlag) { - HostAdapter->ExternalHostAdapterResets = 0; - HostAdapter->HostAdapterInternalErrors = 0; - memset(TargetStatistics, 0, BusLogic_MaxTargetDevices * sizeof(struct BusLogic_TargetStatistics)); - return 0; - } - Buffer = HostAdapter->MessageBuffer; - Length = HostAdapter->MessageBufferLength; - Length += sprintf(&Buffer[Length], "\n\ + struct blogic_adapter *adapter = + (struct blogic_adapter *) shost->hostdata; + struct blogic_tgt_stats *tgt_stats; + + tgt_stats = adapter->tgt_stats; + adapter->ext_resets = 0; + adapter->adapter_intern_errors = 0; + memset(tgt_stats, 0, BLOGIC_MAXDEV * sizeof(struct blogic_tgt_stats)); + return 0; +} + +static int blogic_show_info(struct seq_file *m, struct Scsi_Host *shost) +{ + struct blogic_adapter *adapter = (struct blogic_adapter *) shost->hostdata; + struct blogic_tgt_stats *tgt_stats; + int tgt; + + tgt_stats = adapter->tgt_stats; + seq_write(m, adapter->msgbuf, adapter->msgbuflen); + seq_printf(m, "\n\ Current Driver Queue Depth: %d\n\ -Currently Allocated CCBs: %d\n", HostAdapter->DriverQueueDepth, HostAdapter->AllocatedCCBs); - Length += sprintf(&Buffer[Length], "\n\n\ +Currently Allocated CCBs: %d\n", adapter->drvr_qdepth, adapter->alloc_ccbs); + seq_printf(m, "\n\n\ DATA TRANSFER STATISTICS\n\ \n\ Target Tagged Queuing Queue Depth Active Attempted Completed\n\ ====== ============== =========== ====== ========= =========\n"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) { - struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID]; - if (!TargetFlags->TargetExists) + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) continue; - Length += sprintf(&Buffer[Length], " %2d %s", TargetID, (TargetFlags->TaggedQueuingSupported ? (TargetFlags->TaggedQueuingActive ? " Active" : (HostAdapter->TaggedQueuingPermitted & (1 << TargetID) + seq_printf(m, " %2d %s", tgt, (tgt_flags->tagq_ok ? (tgt_flags->tagq_active ? " Active" : (adapter->tagq_ok & (1 << tgt) ? " Permitted" : " Disabled")) : "Not Supported")); - Length += sprintf(&Buffer[Length], - " %3d %3u %9u %9u\n", HostAdapter->QueueDepth[TargetID], HostAdapter->ActiveCommands[TargetID], TargetStatistics[TargetID].CommandsAttempted, TargetStatistics[TargetID].CommandsCompleted); + seq_printf(m, + " %3d %3u %9u %9u\n", adapter->qdepth[tgt], adapter->active_cmds[tgt], tgt_stats[tgt].cmds_tried, tgt_stats[tgt].cmds_complete); } - Length += sprintf(&Buffer[Length], "\n\ + seq_printf(m, "\n\ Target Read Commands Write Commands Total Bytes Read Total Bytes Written\n\ ====== ============= ============== =================== ===================\n"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) { - struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID]; - if (!TargetFlags->TargetExists) + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) continue; - Length += sprintf(&Buffer[Length], " %2d %9u %9u", TargetID, TargetStatistics[TargetID].ReadCommands, TargetStatistics[TargetID].WriteCommands); - if (TargetStatistics[TargetID].TotalBytesRead.Billions > 0) - Length += sprintf(&Buffer[Length], " %9u%09u", TargetStatistics[TargetID].TotalBytesRead.Billions, TargetStatistics[TargetID].TotalBytesRead.Units); + seq_printf(m, " %2d %9u %9u", tgt, tgt_stats[tgt].read_cmds, tgt_stats[tgt].write_cmds); + if (tgt_stats[tgt].bytesread.billions > 0) + seq_printf(m, " %9u%09u", tgt_stats[tgt].bytesread.billions, tgt_stats[tgt].bytesread.units); else - Length += sprintf(&Buffer[Length], " %9u", TargetStatistics[TargetID].TotalBytesRead.Units); - if (TargetStatistics[TargetID].TotalBytesWritten.Billions > 0) - Length += sprintf(&Buffer[Length], " %9u%09u\n", TargetStatistics[TargetID].TotalBytesWritten.Billions, TargetStatistics[TargetID].TotalBytesWritten.Units); + seq_printf(m, " %9u", tgt_stats[tgt].bytesread.units); + if (tgt_stats[tgt].byteswritten.billions > 0) + seq_printf(m, " %9u%09u\n", tgt_stats[tgt].byteswritten.billions, tgt_stats[tgt].byteswritten.units); else - Length += sprintf(&Buffer[Length], " %9u\n", TargetStatistics[TargetID].TotalBytesWritten.Units); + seq_printf(m, " %9u\n", tgt_stats[tgt].byteswritten.units); } - Length += sprintf(&Buffer[Length], "\n\ + seq_printf(m, "\n\ Target Command 0-1KB 1-2KB 2-4KB 4-8KB 8-16KB\n\ ====== ======= ========= ========= ========= ========= =========\n"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) { - struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID]; - if (!TargetFlags->TargetExists) + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) continue; - Length += - sprintf(&Buffer[Length], - " %2d Read %9u %9u %9u %9u %9u\n", TargetID, - TargetStatistics[TargetID].ReadCommandSizeBuckets[0], - TargetStatistics[TargetID].ReadCommandSizeBuckets[1], TargetStatistics[TargetID].ReadCommandSizeBuckets[2], TargetStatistics[TargetID].ReadCommandSizeBuckets[3], TargetStatistics[TargetID].ReadCommandSizeBuckets[4]); - Length += - sprintf(&Buffer[Length], - " %2d Write %9u %9u %9u %9u %9u\n", TargetID, - TargetStatistics[TargetID].WriteCommandSizeBuckets[0], - TargetStatistics[TargetID].WriteCommandSizeBuckets[1], TargetStatistics[TargetID].WriteCommandSizeBuckets[2], TargetStatistics[TargetID].WriteCommandSizeBuckets[3], TargetStatistics[TargetID].WriteCommandSizeBuckets[4]); - } - Length += sprintf(&Buffer[Length], "\n\ + seq_printf(m, + " %2d Read %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].read_sz_buckets[0], + tgt_stats[tgt].read_sz_buckets[1], tgt_stats[tgt].read_sz_buckets[2], tgt_stats[tgt].read_sz_buckets[3], tgt_stats[tgt].read_sz_buckets[4]); + seq_printf(m, + " %2d Write %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].write_sz_buckets[0], + tgt_stats[tgt].write_sz_buckets[1], tgt_stats[tgt].write_sz_buckets[2], tgt_stats[tgt].write_sz_buckets[3], tgt_stats[tgt].write_sz_buckets[4]); + } + seq_printf(m, "\n\ Target Command 16-32KB 32-64KB 64-128KB 128-256KB 256KB+\n\ ====== ======= ========= ========= ========= ========= =========\n"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) { - struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID]; - if (!TargetFlags->TargetExists) + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) continue; - Length += - sprintf(&Buffer[Length], - " %2d Read %9u %9u %9u %9u %9u\n", TargetID, - TargetStatistics[TargetID].ReadCommandSizeBuckets[5], - TargetStatistics[TargetID].ReadCommandSizeBuckets[6], TargetStatistics[TargetID].ReadCommandSizeBuckets[7], TargetStatistics[TargetID].ReadCommandSizeBuckets[8], TargetStatistics[TargetID].ReadCommandSizeBuckets[9]); - Length += - sprintf(&Buffer[Length], - " %2d Write %9u %9u %9u %9u %9u\n", TargetID, - TargetStatistics[TargetID].WriteCommandSizeBuckets[5], - TargetStatistics[TargetID].WriteCommandSizeBuckets[6], TargetStatistics[TargetID].WriteCommandSizeBuckets[7], TargetStatistics[TargetID].WriteCommandSizeBuckets[8], TargetStatistics[TargetID].WriteCommandSizeBuckets[9]); - } - Length += sprintf(&Buffer[Length], "\n\n\ + seq_printf(m, + " %2d Read %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].read_sz_buckets[5], + tgt_stats[tgt].read_sz_buckets[6], tgt_stats[tgt].read_sz_buckets[7], tgt_stats[tgt].read_sz_buckets[8], tgt_stats[tgt].read_sz_buckets[9]); + seq_printf(m, + " %2d Write %9u %9u %9u %9u %9u\n", tgt, + tgt_stats[tgt].write_sz_buckets[5], + tgt_stats[tgt].write_sz_buckets[6], tgt_stats[tgt].write_sz_buckets[7], tgt_stats[tgt].write_sz_buckets[8], tgt_stats[tgt].write_sz_buckets[9]); + } + seq_printf(m, "\n\n\ ERROR RECOVERY STATISTICS\n\ \n\ Command Aborts Bus Device Resets Host Adapter Resets\n\ Target Requested Completed Requested Completed Requested Completed\n\ ID \\\\\\\\ Attempted //// \\\\\\\\ Attempted //// \\\\\\\\ Attempted ////\n\ ====== ===== ===== ===== ===== ===== ===== ===== ===== =====\n"); - for (TargetID = 0; TargetID < HostAdapter->MaxTargetDevices; TargetID++) { - struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[TargetID]; - if (!TargetFlags->TargetExists) + for (tgt = 0; tgt < adapter->maxdev; tgt++) { + struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; + if (!tgt_flags->tgt_exists) continue; - Length += sprintf(&Buffer[Length], "\ - %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", TargetID, TargetStatistics[TargetID].CommandAbortsRequested, TargetStatistics[TargetID].CommandAbortsAttempted, TargetStatistics[TargetID].CommandAbortsCompleted, TargetStatistics[TargetID].BusDeviceResetsRequested, TargetStatistics[TargetID].BusDeviceResetsAttempted, TargetStatistics[TargetID].BusDeviceResetsCompleted, TargetStatistics[TargetID].HostAdapterResetsRequested, TargetStatistics[TargetID].HostAdapterResetsAttempted, TargetStatistics[TargetID].HostAdapterResetsCompleted); - } - Length += sprintf(&Buffer[Length], "\nExternal Host Adapter Resets: %d\n", HostAdapter->ExternalHostAdapterResets); - Length += sprintf(&Buffer[Length], "Host Adapter Internal Errors: %d\n", HostAdapter->HostAdapterInternalErrors); - if (Length >= BusLogic_MessageBufferSize) - BusLogic_Error("Message Buffer length %d exceeds size %d\n", HostAdapter, Length, BusLogic_MessageBufferSize); - if ((Length -= Offset) <= 0) - return 0; - if (Length >= BytesAvailable) - Length = BytesAvailable; - memcpy(ProcBuffer, HostAdapter->MessageBuffer + Offset, Length); - *StartPointer = ProcBuffer; - return Length; + seq_printf(m, "\ + %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", tgt, tgt_stats[tgt].aborts_request, tgt_stats[tgt].aborts_tried, tgt_stats[tgt].aborts_done, tgt_stats[tgt].bdr_request, tgt_stats[tgt].bdr_tried, tgt_stats[tgt].bdr_done, tgt_stats[tgt].adatper_reset_req, tgt_stats[tgt].adapter_reset_attempt, tgt_stats[tgt].adapter_reset_done); + } + seq_printf(m, "\nExternal Host Adapter Resets: %d\n", adapter->ext_resets); + seq_printf(m, "Host Adapter Internal Errors: %d\n", adapter->adapter_intern_errors); + return 0; } /* - BusLogic_Message prints Driver Messages. + blogic_msg prints Driver Messages. */ -static void BusLogic_Message(enum BusLogic_MessageLevel MessageLevel, char *Format, struct BusLogic_HostAdapter *HostAdapter, ...) +static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, + struct blogic_adapter *adapter, ...) { - static char Buffer[BusLogic_LineBufferSize]; - static bool BeginningOfLine = true; - va_list Arguments; - int Length = 0; - va_start(Arguments, HostAdapter); - Length = vsprintf(Buffer, Format, Arguments); - va_end(Arguments); - if (MessageLevel == BusLogic_AnnounceLevel) { - static int AnnouncementLines = 0; - strcpy(&HostAdapter->MessageBuffer[HostAdapter->MessageBufferLength], Buffer); - HostAdapter->MessageBufferLength += Length; - if (++AnnouncementLines <= 2) - printk("%sscsi: %s", BusLogic_MessageLevelMap[MessageLevel], Buffer); - } else if (MessageLevel == BusLogic_InfoLevel) { - strcpy(&HostAdapter->MessageBuffer[HostAdapter->MessageBufferLength], Buffer); - HostAdapter->MessageBufferLength += Length; - if (BeginningOfLine) { - if (Buffer[0] != '\n' || Length > 1) - printk("%sscsi%d: %s", BusLogic_MessageLevelMap[MessageLevel], HostAdapter->HostNumber, Buffer); + static char buf[BLOGIC_LINEBUF_SIZE]; + static bool begin = true; + va_list args; + int len = 0; + + va_start(args, adapter); + len = vsprintf(buf, fmt, args); + va_end(args); + if (msglevel == BLOGIC_ANNOUNCE_LEVEL) { + static int msglines = 0; + strcpy(&adapter->msgbuf[adapter->msgbuflen], buf); + adapter->msgbuflen += len; + if (++msglines <= 2) + printk("%sscsi: %s", blogic_msglevelmap[msglevel], buf); + } else if (msglevel == BLOGIC_INFO_LEVEL) { + strcpy(&adapter->msgbuf[adapter->msgbuflen], buf); + adapter->msgbuflen += len; + if (begin) { + if (buf[0] != '\n' || len > 1) + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); } else - printk("%s", Buffer); + printk("%s", buf); } else { - if (BeginningOfLine) { - if (HostAdapter != NULL && HostAdapter->HostAdapterInitialized) - printk("%sscsi%d: %s", BusLogic_MessageLevelMap[MessageLevel], HostAdapter->HostNumber, Buffer); + if (begin) { + if (adapter != NULL && adapter->adapter_initd) + printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); else - printk("%s%s", BusLogic_MessageLevelMap[MessageLevel], Buffer); + printk("%s%s", blogic_msglevelmap[msglevel], buf); } else - printk("%s", Buffer); + printk("%s", buf); } - BeginningOfLine = (Buffer[Length - 1] == '\n'); + begin = (buf[len - 1] == '\n'); } /* - BusLogic_ParseKeyword parses an individual option keyword. It returns true + blogic_parse parses an individual option keyword. It returns true and updates the pointer if the keyword is recognized and false otherwise. */ -static bool __init BusLogic_ParseKeyword(char **StringPointer, char *Keyword) +static bool __init blogic_parse(char **str, char *keyword) { - char *Pointer = *StringPointer; - while (*Keyword != '\0') { - char StringChar = *Pointer++; - char KeywordChar = *Keyword++; - if (StringChar >= 'A' && StringChar <= 'Z') - StringChar += 'a' - 'Z'; - if (KeywordChar >= 'A' && KeywordChar <= 'Z') - KeywordChar += 'a' - 'Z'; - if (StringChar != KeywordChar) + char *pointer = *str; + while (*keyword != '\0') { + char strch = *pointer++; + char keywordch = *keyword++; + if (strch >= 'A' && strch <= 'Z') + strch += 'a' - 'Z'; + if (keywordch >= 'A' && keywordch <= 'Z') + keywordch += 'a' - 'Z'; + if (strch != keywordch) return false; } - *StringPointer = Pointer; + *str = pointer; return true; } /* - BusLogic_ParseDriverOptions handles processing of BusLogic Driver Options + blogic_parseopts handles processing of BusLogic Driver Options specifications. BusLogic Driver Options may be specified either via the Linux Kernel Command @@ -3396,164 +3651,177 @@ static bool __init BusLogic_ParseKeyword(char **StringPointer, char *Keyword) <file:Documentation/scsi/BusLogic.txt>. */ -static int __init BusLogic_ParseDriverOptions(char *OptionsString) +static int __init blogic_parseopts(char *options) { while (true) { - struct BusLogic_DriverOptions *DriverOptions = &BusLogic_DriverOptions[BusLogic_DriverOptionsCount++]; - int TargetID; - memset(DriverOptions, 0, sizeof(struct BusLogic_DriverOptions)); - while (*OptionsString != '\0' && *OptionsString != ';') { + struct blogic_drvr_options *drvr_opts = + &blogic_drvr_options[blogic_drvr_options_count++]; + int tgt_id; + + memset(drvr_opts, 0, sizeof(struct blogic_drvr_options)); + while (*options != '\0' && *options != ';') { /* Probing Options. */ - if (BusLogic_ParseKeyword(&OptionsString, "IO:")) { - unsigned long IO_Address = simple_strtoul(OptionsString, &OptionsString, 0); - BusLogic_ProbeOptions.LimitedProbeISA = true; - switch (IO_Address) { + if (blogic_parse(&options, "IO:")) { + unsigned long io_addr = simple_strtoul(options, + &options, 0); + blogic_probe_options.limited_isa = true; + switch (io_addr) { case 0x330: - BusLogic_ProbeOptions.Probe330 = true; + blogic_probe_options.probe330 = true; break; case 0x334: - BusLogic_ProbeOptions.Probe334 = true; + blogic_probe_options.probe334 = true; break; case 0x230: - BusLogic_ProbeOptions.Probe230 = true; + blogic_probe_options.probe230 = true; break; case 0x234: - BusLogic_ProbeOptions.Probe234 = true; + blogic_probe_options.probe234 = true; break; case 0x130: - BusLogic_ProbeOptions.Probe130 = true; + blogic_probe_options.probe130 = true; break; case 0x134: - BusLogic_ProbeOptions.Probe134 = true; + blogic_probe_options.probe134 = true; break; default: - BusLogic_Error("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, IO_Address); + blogic_err("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, io_addr); return 0; } - } else if (BusLogic_ParseKeyword(&OptionsString, "NoProbeISA")) - BusLogic_ProbeOptions.NoProbeISA = true; - else if (BusLogic_ParseKeyword(&OptionsString, "NoProbePCI")) - BusLogic_ProbeOptions.NoProbePCI = true; - else if (BusLogic_ParseKeyword(&OptionsString, "NoProbe")) - BusLogic_ProbeOptions.NoProbe = true; - else if (BusLogic_ParseKeyword(&OptionsString, "NoSortPCI")) - BusLogic_ProbeOptions.NoSortPCI = true; - else if (BusLogic_ParseKeyword(&OptionsString, "MultiMasterFirst")) - BusLogic_ProbeOptions.MultiMasterFirst = true; - else if (BusLogic_ParseKeyword(&OptionsString, "FlashPointFirst")) - BusLogic_ProbeOptions.FlashPointFirst = true; + } else if (blogic_parse(&options, "NoProbeISA")) + blogic_probe_options.noprobe_isa = true; + else if (blogic_parse(&options, "NoProbePCI")) + blogic_probe_options.noprobe_pci = true; + else if (blogic_parse(&options, "NoProbe")) + blogic_probe_options.noprobe = true; + else if (blogic_parse(&options, "NoSortPCI")) + blogic_probe_options.nosort_pci = true; + else if (blogic_parse(&options, "MultiMasterFirst")) + blogic_probe_options.multimaster_first = true; + else if (blogic_parse(&options, "FlashPointFirst")) + blogic_probe_options.flashpoint_first = true; /* Tagged Queuing Options. */ - else if (BusLogic_ParseKeyword(&OptionsString, "QueueDepth:[") || BusLogic_ParseKeyword(&OptionsString, "QD:[")) { - for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++) { - unsigned short QueueDepth = simple_strtoul(OptionsString, &OptionsString, 0); - if (QueueDepth > BusLogic_MaxTaggedQueueDepth) { - BusLogic_Error("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, QueueDepth); + else if (blogic_parse(&options, "QueueDepth:[") || + blogic_parse(&options, "QD:[")) { + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { + unsigned short qdepth = simple_strtoul(options, &options, 0); + if (qdepth > BLOGIC_MAX_TAG_DEPTH) { + blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth); return 0; } - DriverOptions->QueueDepth[TargetID] = QueueDepth; - if (*OptionsString == ',') - OptionsString++; - else if (*OptionsString == ']') + drvr_opts->qdepth[tgt_id] = qdepth; + if (*options == ',') + options++; + else if (*options == ']') break; else { - BusLogic_Error("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, OptionsString); + blogic_err("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, options); return 0; } } - if (*OptionsString != ']') { - BusLogic_Error("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, OptionsString); + if (*options != ']') { + blogic_err("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, options); return 0; } else - OptionsString++; - } else if (BusLogic_ParseKeyword(&OptionsString, "QueueDepth:") || BusLogic_ParseKeyword(&OptionsString, "QD:")) { - unsigned short QueueDepth = simple_strtoul(OptionsString, &OptionsString, 0); - if (QueueDepth == 0 || QueueDepth > BusLogic_MaxTaggedQueueDepth) { - BusLogic_Error("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, QueueDepth); + options++; + } else if (blogic_parse(&options, "QueueDepth:") || blogic_parse(&options, "QD:")) { + unsigned short qdepth = simple_strtoul(options, &options, 0); + if (qdepth == 0 || + qdepth > BLOGIC_MAX_TAG_DEPTH) { + blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth); return 0; } - DriverOptions->CommonQueueDepth = QueueDepth; - for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++) - DriverOptions->QueueDepth[TargetID] = QueueDepth; - } else if (BusLogic_ParseKeyword(&OptionsString, "TaggedQueuing:") || BusLogic_ParseKeyword(&OptionsString, "TQ:")) { - if (BusLogic_ParseKeyword(&OptionsString, "Default")) { - DriverOptions->TaggedQueuingPermitted = 0x0000; - DriverOptions->TaggedQueuingPermittedMask = 0x0000; - } else if (BusLogic_ParseKeyword(&OptionsString, "Enable")) { - DriverOptions->TaggedQueuingPermitted = 0xFFFF; - DriverOptions->TaggedQueuingPermittedMask = 0xFFFF; - } else if (BusLogic_ParseKeyword(&OptionsString, "Disable")) { - DriverOptions->TaggedQueuingPermitted = 0x0000; - DriverOptions->TaggedQueuingPermittedMask = 0xFFFF; + drvr_opts->common_qdepth = qdepth; + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) + drvr_opts->qdepth[tgt_id] = qdepth; + } else if (blogic_parse(&options, "TaggedQueuing:") || + blogic_parse(&options, "TQ:")) { + if (blogic_parse(&options, "Default")) { + drvr_opts->tagq_ok = 0x0000; + drvr_opts->tagq_ok_mask = 0x0000; + } else if (blogic_parse(&options, "Enable")) { + drvr_opts->tagq_ok = 0xFFFF; + drvr_opts->tagq_ok_mask = 0xFFFF; + } else if (blogic_parse(&options, "Disable")) { + drvr_opts->tagq_ok = 0x0000; + drvr_opts->tagq_ok_mask = 0xFFFF; } else { - unsigned short TargetBit; - for (TargetID = 0, TargetBit = 1; TargetID < BusLogic_MaxTargetDevices; TargetID++, TargetBit <<= 1) - switch (*OptionsString++) { + unsigned short tgt_bit; + for (tgt_id = 0, tgt_bit = 1; + tgt_id < BLOGIC_MAXDEV; + tgt_id++, tgt_bit <<= 1) + switch (*options++) { case 'Y': - DriverOptions->TaggedQueuingPermitted |= TargetBit; - DriverOptions->TaggedQueuingPermittedMask |= TargetBit; + drvr_opts->tagq_ok |= tgt_bit; + drvr_opts->tagq_ok_mask |= tgt_bit; break; case 'N': - DriverOptions->TaggedQueuingPermitted &= ~TargetBit; - DriverOptions->TaggedQueuingPermittedMask |= TargetBit; + drvr_opts->tagq_ok &= ~tgt_bit; + drvr_opts->tagq_ok_mask |= tgt_bit; break; case 'X': break; default: - OptionsString--; - TargetID = BusLogic_MaxTargetDevices; + options--; + tgt_id = BLOGIC_MAXDEV; break; } } } /* Miscellaneous Options. */ - else if (BusLogic_ParseKeyword(&OptionsString, "BusSettleTime:") || BusLogic_ParseKeyword(&OptionsString, "BST:")) { - unsigned short BusSettleTime = simple_strtoul(OptionsString, &OptionsString, 0); - if (BusSettleTime > 5 * 60) { - BusLogic_Error("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, BusSettleTime); + else if (blogic_parse(&options, "BusSettleTime:") || + blogic_parse(&options, "BST:")) { + unsigned short bus_settle_time = + simple_strtoul(options, &options, 0); + if (bus_settle_time > 5 * 60) { + blogic_err("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, bus_settle_time); return 0; } - DriverOptions->BusSettleTime = BusSettleTime; - } else if (BusLogic_ParseKeyword(&OptionsString, "InhibitTargetInquiry")) - DriverOptions->LocalOptions.InhibitTargetInquiry = true; + drvr_opts->bus_settle_time = bus_settle_time; + } else if (blogic_parse(&options, + "InhibitTargetInquiry")) + drvr_opts->stop_tgt_inquiry = true; /* Debugging Options. */ - else if (BusLogic_ParseKeyword(&OptionsString, "TraceProbe")) - BusLogic_GlobalOptions.TraceProbe = true; - else if (BusLogic_ParseKeyword(&OptionsString, "TraceHardwareReset")) - BusLogic_GlobalOptions.TraceHardwareReset = true; - else if (BusLogic_ParseKeyword(&OptionsString, "TraceConfiguration")) - BusLogic_GlobalOptions.TraceConfiguration = true; - else if (BusLogic_ParseKeyword(&OptionsString, "TraceErrors")) - BusLogic_GlobalOptions.TraceErrors = true; - else if (BusLogic_ParseKeyword(&OptionsString, "Debug")) { - BusLogic_GlobalOptions.TraceProbe = true; - BusLogic_GlobalOptions.TraceHardwareReset = true; - BusLogic_GlobalOptions.TraceConfiguration = true; - BusLogic_GlobalOptions.TraceErrors = true; + else if (blogic_parse(&options, "TraceProbe")) + blogic_global_options.trace_probe = true; + else if (blogic_parse(&options, "TraceHardwareReset")) + blogic_global_options.trace_hw_reset = true; + else if (blogic_parse(&options, "TraceConfiguration")) + blogic_global_options.trace_config = true; + else if (blogic_parse(&options, "TraceErrors")) + blogic_global_options.trace_err = true; + else if (blogic_parse(&options, "Debug")) { + blogic_global_options.trace_probe = true; + blogic_global_options.trace_hw_reset = true; + blogic_global_options.trace_config = true; + blogic_global_options.trace_err = true; } - if (*OptionsString == ',') - OptionsString++; - else if (*OptionsString != ';' && *OptionsString != '\0') { - BusLogic_Error("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, OptionsString); - *OptionsString = '\0'; + if (*options == ',') + options++; + else if (*options != ';' && *options != '\0') { + blogic_err("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, options); + *options = '\0'; } } - if (!(BusLogic_DriverOptionsCount == 0 || BusLogic_ProbeInfoCount == 0 || BusLogic_DriverOptionsCount == BusLogic_ProbeInfoCount)) { - BusLogic_Error("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL); + if (!(blogic_drvr_options_count == 0 || + blogic_probeinfo_count == 0 || + blogic_drvr_options_count == blogic_probeinfo_count)) { + blogic_err("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL); return 0; } /* Tagged Queuing is disabled when the Queue Depth is 1 since queuing multiple commands is not possible. */ - for (TargetID = 0; TargetID < BusLogic_MaxTargetDevices; TargetID++) - if (DriverOptions->QueueDepth[TargetID] == 1) { - unsigned short TargetBit = 1 << TargetID; - DriverOptions->TaggedQueuingPermitted &= ~TargetBit; - DriverOptions->TaggedQueuingPermittedMask |= TargetBit; + for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) + if (drvr_opts->qdepth[tgt_id] == 1) { + unsigned short tgt_bit = 1 << tgt_id; + drvr_opts->tagq_ok &= ~tgt_bit; + drvr_opts->tagq_ok_mask |= tgt_bit; } - if (*OptionsString == ';') - OptionsString++; - if (*OptionsString == '\0') + if (*options == ';') + options++; + if (*options == '\0') return 0; } return 1; @@ -3563,18 +3831,19 @@ static int __init BusLogic_ParseDriverOptions(char *OptionsString) Get it all started */ -static struct scsi_host_template Bus_Logic_template = { +static struct scsi_host_template blogic_template = { .module = THIS_MODULE, .proc_name = "BusLogic", - .proc_info = BusLogic_ProcDirectoryInfo, + .write_info = blogic_write_info, + .show_info = blogic_show_info, .name = "BusLogic", - .info = BusLogic_DriverInfo, - .queuecommand = BusLogic_QueueCommand, - .slave_configure = BusLogic_SlaveConfigure, - .bios_param = BusLogic_BIOSDiskParameters, - .eh_host_reset_handler = BusLogic_host_reset, + .info = blogic_drvr_info, + .queuecommand = blogic_qcmd, + .slave_configure = blogic_slaveconfig, + .bios_param = blogic_diskparam, + .eh_host_reset_handler = blogic_hostreset, #if 0 - .eh_abort_handler = BusLogic_AbortCommand, + .eh_abort_handler = blogic_abort, #endif .unchecked_isa_dma = 1, .max_sectors = 128, @@ -3582,40 +3851,40 @@ static struct scsi_host_template Bus_Logic_template = { }; /* - BusLogic_Setup handles processing of Kernel Command Line Arguments. + blogic_setup handles processing of Kernel Command Line Arguments. */ -static int __init BusLogic_Setup(char *str) +static int __init blogic_setup(char *str) { int ints[3]; (void) get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] != 0) { - BusLogic_Error("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL); + blogic_err("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL); return 0; } if (str == NULL || *str == '\0') return 0; - return BusLogic_ParseDriverOptions(str); + return blogic_parseopts(str); } /* * Exit function. Deletes all hosts associated with this driver. */ -static void __exit BusLogic_exit(void) +static void __exit blogic_exit(void) { - struct BusLogic_HostAdapter *ha, *next; + struct blogic_adapter *ha, *next; - list_for_each_entry_safe(ha, next, &BusLogic_host_list, host_list) - BusLogic_ReleaseHostAdapter(ha); + list_for_each_entry_safe(ha, next, &blogic_host_list, host_list) + blogic_deladapter(ha); } -__setup("BusLogic=", BusLogic_Setup); +__setup("BusLogic=", blogic_setup); #ifdef MODULE -static struct pci_device_id BusLogic_pci_tbl[] = { +/*static struct pci_device_id blogic_pci_tbl[] = { { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC, @@ -3623,9 +3892,15 @@ static struct pci_device_id BusLogic_pci_tbl[] = { { PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } +};*/ +static DEFINE_PCI_DEVICE_TABLE(blogic_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)}, + {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)}, + {PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)}, + {0, }, }; #endif -MODULE_DEVICE_TABLE(pci, BusLogic_pci_tbl); +MODULE_DEVICE_TABLE(pci, blogic_pci_tbl); -module_init(BusLogic_init); -module_exit(BusLogic_exit); +module_init(blogic_init); +module_exit(blogic_exit); diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index 649fcb31f26..b53ec2f1e8c 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h @@ -37,14 +37,14 @@ Define the maximum number of BusLogic Host Adapters supported by this driver. */ -#define BusLogic_MaxHostAdapters 16 +#define BLOGIC_MAX_ADAPTERS 16 /* Define the maximum number of Target Devices supported by this driver. */ -#define BusLogic_MaxTargetDevices 16 +#define BLOGIC_MAXDEV 16 /* @@ -53,7 +53,7 @@ large as the largest single request generated by the I/O Subsystem. */ -#define BusLogic_ScatterGatherLimit 128 +#define BLOGIC_SG_LIMIT 128 /* @@ -62,12 +62,12 @@ Tagged Queuing and whether or not ISA Bounce Buffers are required. */ -#define BusLogic_MaxTaggedQueueDepth 64 -#define BusLogic_MaxAutomaticTaggedQueueDepth 28 -#define BusLogic_MinAutomaticTaggedQueueDepth 7 -#define BusLogic_TaggedQueueDepthBB 3 -#define BusLogic_UntaggedQueueDepth 3 -#define BusLogic_UntaggedQueueDepthBB 2 +#define BLOGIC_MAX_TAG_DEPTH 64 +#define BLOGIC_MAX_AUTO_TAG_DEPTH 28 +#define BLOGIC_MIN_AUTO_TAG_DEPTH 7 +#define BLOGIC_TAG_DEPTH_BB 3 +#define BLOGIC_UNTAG_DEPTH 3 +#define BLOGIC_UNTAG_DEPTH_BB 2 /* @@ -77,7 +77,7 @@ a SCSI Bus Reset. */ -#define BusLogic_DefaultBusSettleTime 2 +#define BLOGIC_BUS_SETTLE_TIME 2 /* @@ -87,7 +87,7 @@ does not cross an allocation block size boundary. */ -#define BusLogic_MaxMailboxes 211 +#define BLOGIC_MAX_MAILBOX 211 /* @@ -95,50 +95,50 @@ Kernel memory allocation. */ -#define BusLogic_CCB_AllocationGroupSize 7 +#define BLOGIC_CCB_GRP_ALLOCSIZE 7 /* Define the Host Adapter Line and Message Buffer Sizes. */ -#define BusLogic_LineBufferSize 100 -#define BusLogic_MessageBufferSize 9700 +#define BLOGIC_LINEBUF_SIZE 100 +#define BLOGIC_MSGBUF_SIZE 9700 /* Define the Driver Message Levels. */ -enum BusLogic_MessageLevel { - BusLogic_AnnounceLevel = 0, - BusLogic_InfoLevel = 1, - BusLogic_NoticeLevel = 2, - BusLogic_WarningLevel = 3, - BusLogic_ErrorLevel = 4 +enum blogic_msglevel { + BLOGIC_ANNOUNCE_LEVEL = 0, + BLOGIC_INFO_LEVEL = 1, + BLOGIC_NOTICE_LEVEL = 2, + BLOGIC_WARN_LEVEL = 3, + BLOGIC_ERR_LEVEL = 4 }; -static char *BusLogic_MessageLevelMap[] = { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING, KERN_ERR }; +static char *blogic_msglevelmap[] = { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING, KERN_ERR }; /* Define Driver Message macros. */ -#define BusLogic_Announce(Format, Arguments...) \ - BusLogic_Message(BusLogic_AnnounceLevel, Format, ##Arguments) +#define blogic_announce(format, args...) \ + blogic_msg(BLOGIC_ANNOUNCE_LEVEL, format, ##args) -#define BusLogic_Info(Format, Arguments...) \ - BusLogic_Message(BusLogic_InfoLevel, Format, ##Arguments) +#define blogic_info(format, args...) \ + blogic_msg(BLOGIC_INFO_LEVEL, format, ##args) -#define BusLogic_Notice(Format, Arguments...) \ - BusLogic_Message(BusLogic_NoticeLevel, Format, ##Arguments) +#define blogic_notice(format, args...) \ + blogic_msg(BLOGIC_NOTICE_LEVEL, format, ##args) -#define BusLogic_Warning(Format, Arguments...) \ - BusLogic_Message(BusLogic_WarningLevel, Format, ##Arguments) +#define blogic_warn(format, args...) \ + blogic_msg(BLOGIC_WARN_LEVEL, format, ##args) -#define BusLogic_Error(Format, Arguments...) \ - BusLogic_Message(BusLogic_ErrorLevel, Format, ##Arguments) +#define blogic_err(format, args...) \ + blogic_msg(BLOGIC_ERR_LEVEL, format, ##args) /* @@ -146,15 +146,15 @@ static char *BusLogic_MessageLevelMap[] = { KERN_NOTICE, KERN_NOTICE, KERN_NOTIC of I/O Addresses required by each type. */ -enum BusLogic_HostAdapterType { - BusLogic_MultiMaster = 1, - BusLogic_FlashPoint = 2 +enum blogic_adapter_type { + BLOGIC_MULTIMASTER = 1, + BLOGIC_FLASHPOINT = 2 } PACKED; -#define BusLogic_MultiMasterAddressCount 4 -#define BusLogic_FlashPointAddressCount 256 +#define BLOGIC_MULTIMASTER_ADDR_COUNT 4 +#define BLOGIC_FLASHPOINT_ADDR_COUNT 256 -static int BusLogic_HostAdapterAddressCount[3] = { 0, BusLogic_MultiMasterAddressCount, BusLogic_FlashPointAddressCount }; +static int blogic_adapter_addr_count[3] = { 0, BLOGIC_MULTIMASTER_ADDR_COUNT, BLOGIC_FLASHPOINT_ADDR_COUNT }; /* @@ -163,19 +163,16 @@ static int BusLogic_HostAdapterAddressCount[3] = { 0, BusLogic_MultiMasterAddres #ifdef CONFIG_SCSI_FLASHPOINT -#define BusLogic_MultiMasterHostAdapterP(HostAdapter) \ - (HostAdapter->HostAdapterType == BusLogic_MultiMaster) +#define blogic_multimaster_type(adapter) \ + (adapter->adapter_type == BLOGIC_MULTIMASTER) -#define BusLogic_FlashPointHostAdapterP(HostAdapter) \ - (HostAdapter->HostAdapterType == BusLogic_FlashPoint) +#define blogic_flashpoint_type(adapter) \ + (adapter->adapter_type == BLOGIC_FLASHPOINT) #else -#define BusLogic_MultiMasterHostAdapterP(HostAdapter) \ - (true) - -#define BusLogic_FlashPointHostAdapterP(HostAdapter) \ - (false) +#define blogic_multimaster_type(adapter) (true) +#define blogic_flashpoint_type(adapter) (false) #endif @@ -184,35 +181,35 @@ static int BusLogic_HostAdapterAddressCount[3] = { 0, BusLogic_MultiMasterAddres Define the possible Host Adapter Bus Types. */ -enum BusLogic_HostAdapterBusType { - BusLogic_Unknown_Bus = 0, - BusLogic_ISA_Bus = 1, - BusLogic_EISA_Bus = 2, - BusLogic_PCI_Bus = 3, - BusLogic_VESA_Bus = 4, - BusLogic_MCA_Bus = 5 +enum blogic_adapter_bus_type { + BLOGIC_UNKNOWN_BUS = 0, + BLOGIC_ISA_BUS = 1, + BLOGIC_EISA_BUS = 2, + BLOGIC_PCI_BUS = 3, + BLOGIC_VESA_BUS = 4, + BLOGIC_MCA_BUS = 5 } PACKED; -static char *BusLogic_HostAdapterBusNames[] = { "Unknown", "ISA", "EISA", "PCI", "VESA", "MCA" }; +static char *blogic_adapter_busnames[] = { "Unknown", "ISA", "EISA", "PCI", "VESA", "MCA" }; -static enum BusLogic_HostAdapterBusType BusLogic_HostAdapterBusTypes[] = { - BusLogic_VESA_Bus, /* BT-4xx */ - BusLogic_ISA_Bus, /* BT-5xx */ - BusLogic_MCA_Bus, /* BT-6xx */ - BusLogic_EISA_Bus, /* BT-7xx */ - BusLogic_Unknown_Bus, /* BT-8xx */ - BusLogic_PCI_Bus /* BT-9xx */ +static enum blogic_adapter_bus_type blogic_adater_bus_types[] = { + BLOGIC_VESA_BUS, /* BT-4xx */ + BLOGIC_ISA_BUS, /* BT-5xx */ + BLOGIC_MCA_BUS, /* BT-6xx */ + BLOGIC_EISA_BUS, /* BT-7xx */ + BLOGIC_UNKNOWN_BUS, /* BT-8xx */ + BLOGIC_PCI_BUS /* BT-9xx */ }; /* Define the possible Host Adapter BIOS Disk Geometry Translations. */ -enum BusLogic_BIOS_DiskGeometryTranslation { - BusLogic_BIOS_Disk_Not_Installed = 0, - BusLogic_BIOS_Disk_Installed_64x32 = 1, - BusLogic_BIOS_Disk_Installed_128x32 = 2, - BusLogic_BIOS_Disk_Installed_255x63 = 3 +enum blogic_bios_diskgeometry { + BLOGIC_BIOS_NODISK = 0, + BLOGIC_BIOS_DISK64x32 = 1, + BLOGIC_BIOS_DISK128x32 = 2, + BLOGIC_BIOS_DISK255x63 = 3 } PACKED; @@ -220,9 +217,9 @@ enum BusLogic_BIOS_DiskGeometryTranslation { Define a 10^18 Statistics Byte Counter data type. */ -struct BusLogic_ByteCounter { - unsigned int Units; - unsigned int Billions; +struct blogic_byte_count { + unsigned int units; + unsigned int billions; }; @@ -230,79 +227,71 @@ struct BusLogic_ByteCounter { Define the structure for I/O Address and Bus Probing Information. */ -struct BusLogic_ProbeInfo { - enum BusLogic_HostAdapterType HostAdapterType; - enum BusLogic_HostAdapterBusType HostAdapterBusType; - unsigned long IO_Address; - unsigned long PCI_Address; - struct pci_dev *PCI_Device; - unsigned char Bus; - unsigned char Device; - unsigned char IRQ_Channel; +struct blogic_probeinfo { + enum blogic_adapter_type adapter_type; + enum blogic_adapter_bus_type adapter_bus_type; + unsigned long io_addr; + unsigned long pci_addr; + struct pci_dev *pci_device; + unsigned char bus; + unsigned char dev; + unsigned char irq_ch; }; /* Define the Probe Options. */ -struct BusLogic_ProbeOptions { - bool NoProbe:1; /* Bit 0 */ - bool NoProbeISA:1; /* Bit 1 */ - bool NoProbePCI:1; /* Bit 2 */ - bool NoSortPCI:1; /* Bit 3 */ - bool MultiMasterFirst:1;/* Bit 4 */ - bool FlashPointFirst:1; /* Bit 5 */ - bool LimitedProbeISA:1; /* Bit 6 */ - bool Probe330:1; /* Bit 7 */ - bool Probe334:1; /* Bit 8 */ - bool Probe230:1; /* Bit 9 */ - bool Probe234:1; /* Bit 10 */ - bool Probe130:1; /* Bit 11 */ - bool Probe134:1; /* Bit 12 */ +struct blogic_probe_options { + bool noprobe:1; /* Bit 0 */ + bool noprobe_isa:1; /* Bit 1 */ + bool noprobe_pci:1; /* Bit 2 */ + bool nosort_pci:1; /* Bit 3 */ + bool multimaster_first:1; /* Bit 4 */ + bool flashpoint_first:1; /* Bit 5 */ + bool limited_isa:1; /* Bit 6 */ + bool probe330:1; /* Bit 7 */ + bool probe334:1; /* Bit 8 */ + bool probe230:1; /* Bit 9 */ + bool probe234:1; /* Bit 10 */ + bool probe130:1; /* Bit 11 */ + bool probe134:1; /* Bit 12 */ }; /* Define the Global Options. */ -struct BusLogic_GlobalOptions { - bool TraceProbe:1; /* Bit 0 */ - bool TraceHardwareReset:1; /* Bit 1 */ - bool TraceConfiguration:1; /* Bit 2 */ - bool TraceErrors:1; /* Bit 3 */ -}; - -/* - Define the Local Options. -*/ - -struct BusLogic_LocalOptions { - bool InhibitTargetInquiry:1; /* Bit 0 */ +struct blogic_global_options { + bool trace_probe:1; /* Bit 0 */ + bool trace_hw_reset:1; /* Bit 1 */ + bool trace_config:1; /* Bit 2 */ + bool trace_err:1; /* Bit 3 */ }; /* Define the BusLogic SCSI Host Adapter I/O Register Offsets. */ -#define BusLogic_ControlRegisterOffset 0 /* WO register */ -#define BusLogic_StatusRegisterOffset 0 /* RO register */ -#define BusLogic_CommandParameterRegisterOffset 1 /* WO register */ -#define BusLogic_DataInRegisterOffset 1 /* RO register */ -#define BusLogic_InterruptRegisterOffset 2 /* RO register */ -#define BusLogic_GeometryRegisterOffset 3 /* RO register */ +#define BLOGIC_CNTRL_REG 0 /* WO register */ +#define BLOGIC_STATUS_REG 0 /* RO register */ +#define BLOGIC_CMD_PARM_REG 1 /* WO register */ +#define BLOGIC_DATAIN_REG 1 /* RO register */ +#define BLOGIC_INT_REG 2 /* RO register */ +#define BLOGIC_GEOMETRY_REG 3 /* RO register */ /* Define the structure of the write-only Control Register. */ -union BusLogic_ControlRegister { - unsigned char All; +union blogic_cntrl_reg { + unsigned char all; struct { unsigned char:4; /* Bits 0-3 */ - bool SCSIBusReset:1; /* Bit 4 */ - bool InterruptReset:1; /* Bit 5 */ - bool SoftReset:1; /* Bit 6 */ - bool HardReset:1; /* Bit 7 */ + bool bus_reset:1; /* Bit 4 */ + bool int_reset:1; /* Bit 5 */ + bool soft_reset:1; /* Bit 6 */ + bool hard_reset:1; /* Bit 7 */ } cr; }; @@ -310,17 +299,17 @@ union BusLogic_ControlRegister { Define the structure of the read-only Status Register. */ -union BusLogic_StatusRegister { - unsigned char All; +union blogic_stat_reg { + unsigned char all; struct { - bool CommandInvalid:1; /* Bit 0 */ - bool Reserved:1; /* Bit 1 */ - bool DataInRegisterReady:1; /* Bit 2 */ - bool CommandParameterRegisterBusy:1; /* Bit 3 */ - bool HostAdapterReady:1; /* Bit 4 */ - bool InitializationRequired:1; /* Bit 5 */ - bool DiagnosticFailure:1; /* Bit 6 */ - bool DiagnosticActive:1; /* Bit 7 */ + bool cmd_invalid:1; /* Bit 0 */ + bool rsvd:1; /* Bit 1 */ + bool datain_ready:1; /* Bit 2 */ + bool cmd_param_busy:1; /* Bit 3 */ + bool adapter_ready:1; /* Bit 4 */ + bool init_reqd:1; /* Bit 5 */ + bool diag_failed:1; /* Bit 6 */ + bool diag_active:1; /* Bit 7 */ } sr; }; @@ -328,15 +317,15 @@ union BusLogic_StatusRegister { Define the structure of the read-only Interrupt Register. */ -union BusLogic_InterruptRegister { - unsigned char All; +union blogic_int_reg { + unsigned char all; struct { - bool IncomingMailboxLoaded:1; /* Bit 0 */ - bool OutgoingMailboxAvailable:1;/* Bit 1 */ - bool CommandComplete:1; /* Bit 2 */ - bool ExternalBusReset:1; /* Bit 3 */ - unsigned char Reserved:3; /* Bits 4-6 */ - bool InterruptValid:1; /* Bit 7 */ + bool mailin_loaded:1; /* Bit 0 */ + bool mailout_avail:1; /* Bit 1 */ + bool cmd_complete:1; /* Bit 2 */ + bool ext_busreset:1; /* Bit 3 */ + unsigned char rsvd:3; /* Bits 4-6 */ + bool int_valid:1; /* Bit 7 */ } ir; }; @@ -344,13 +333,13 @@ union BusLogic_InterruptRegister { Define the structure of the read-only Geometry Register. */ -union BusLogic_GeometryRegister { - unsigned char All; +union blogic_geo_reg { + unsigned char all; struct { - enum BusLogic_BIOS_DiskGeometryTranslation Drive0Geometry:2; /* Bits 0-1 */ - enum BusLogic_BIOS_DiskGeometryTranslation Drive1Geometry:2; /* Bits 2-3 */ + enum blogic_bios_diskgeometry d0_geo:2; /* Bits 0-1 */ + enum blogic_bios_diskgeometry d1_geo:2; /* Bits 2-3 */ unsigned char:3; /* Bits 4-6 */ - bool ExtendedTranslationEnabled:1; /* Bit 7 */ + bool ext_trans_enable:1; /* Bit 7 */ } gr; }; @@ -358,82 +347,82 @@ union BusLogic_GeometryRegister { Define the BusLogic SCSI Host Adapter Command Register Operation Codes. */ -enum BusLogic_OperationCode { - BusLogic_TestCommandCompleteInterrupt = 0x00, - BusLogic_InitializeMailbox = 0x01, - BusLogic_ExecuteMailboxCommand = 0x02, - BusLogic_ExecuteBIOSCommand = 0x03, - BusLogic_InquireBoardID = 0x04, - BusLogic_EnableOutgoingMailboxAvailableInt = 0x05, - BusLogic_SetSCSISelectionTimeout = 0x06, - BusLogic_SetPreemptTimeOnBus = 0x07, - BusLogic_SetTimeOffBus = 0x08, - BusLogic_SetBusTransferRate = 0x09, - BusLogic_InquireInstalledDevicesID0to7 = 0x0A, - BusLogic_InquireConfiguration = 0x0B, - BusLogic_EnableTargetMode = 0x0C, - BusLogic_InquireSetupInformation = 0x0D, - BusLogic_WriteAdapterLocalRAM = 0x1A, - BusLogic_ReadAdapterLocalRAM = 0x1B, - BusLogic_WriteBusMasterChipFIFO = 0x1C, - BusLogic_ReadBusMasterChipFIFO = 0x1D, - BusLogic_EchoCommandData = 0x1F, - BusLogic_HostAdapterDiagnostic = 0x20, - BusLogic_SetAdapterOptions = 0x21, - BusLogic_InquireInstalledDevicesID8to15 = 0x23, - BusLogic_InquireTargetDevices = 0x24, - BusLogic_DisableHostAdapterInterrupt = 0x25, - BusLogic_InitializeExtendedMailbox = 0x81, - BusLogic_ExecuteSCSICommand = 0x83, - BusLogic_InquireFirmwareVersion3rdDigit = 0x84, - BusLogic_InquireFirmwareVersionLetter = 0x85, - BusLogic_InquirePCIHostAdapterInformation = 0x86, - BusLogic_InquireHostAdapterModelNumber = 0x8B, - BusLogic_InquireSynchronousPeriod = 0x8C, - BusLogic_InquireExtendedSetupInformation = 0x8D, - BusLogic_EnableStrictRoundRobinMode = 0x8F, - BusLogic_StoreHostAdapterLocalRAM = 0x90, - BusLogic_FetchHostAdapterLocalRAM = 0x91, - BusLogic_StoreLocalDataInEEPROM = 0x92, - BusLogic_UploadAutoSCSICode = 0x94, - BusLogic_ModifyIOAddress = 0x95, - BusLogic_SetCCBFormat = 0x96, - BusLogic_WriteInquiryBuffer = 0x9A, - BusLogic_ReadInquiryBuffer = 0x9B, - BusLogic_FlashROMUploadDownload = 0xA7, - BusLogic_ReadSCAMData = 0xA8, - BusLogic_WriteSCAMData = 0xA9 +enum blogic_opcode { + BLOGIC_TEST_CMP_COMPLETE = 0x00, + BLOGIC_INIT_MBOX = 0x01, + BLOGIC_EXEC_MBOX_CMD = 0x02, + BLOGIC_EXEC_BIOS_CMD = 0x03, + BLOGIC_GET_BOARD_ID = 0x04, + BLOGIC_ENABLE_OUTBOX_AVAIL_INT = 0x05, + BLOGIC_SET_SELECT_TIMEOUT = 0x06, + BLOGIC_SET_PREEMPT_TIME = 0x07, + BLOGIC_SET_TIMEOFF_BUS = 0x08, + BLOGIC_SET_TXRATE = 0x09, + BLOGIC_INQ_DEV0TO7 = 0x0A, + BLOGIC_INQ_CONFIG = 0x0B, + BLOGIC_TGT_MODE = 0x0C, + BLOGIC_INQ_SETUPINFO = 0x0D, + BLOGIC_WRITE_LOCALRAM = 0x1A, + BLOGIC_READ_LOCALRAM = 0x1B, + BLOGIC_WRITE_BUSMASTER_FIFO = 0x1C, + BLOGIC_READ_BUSMASTER_FIFO = 0x1D, + BLOGIC_ECHO_CMDDATA = 0x1F, + BLOGIC_ADAPTER_DIAG = 0x20, + BLOGIC_SET_OPTIONS = 0x21, + BLOGIC_INQ_DEV8TO15 = 0x23, + BLOGIC_INQ_DEV = 0x24, + BLOGIC_DISABLE_INT = 0x25, + BLOGIC_INIT_EXT_MBOX = 0x81, + BLOGIC_EXEC_SCS_CMD = 0x83, + BLOGIC_INQ_FWVER_D3 = 0x84, + BLOGIC_INQ_FWVER_LETTER = 0x85, + BLOGIC_INQ_PCI_INFO = 0x86, + BLOGIC_INQ_MODELNO = 0x8B, + BLOGIC_INQ_SYNC_PERIOD = 0x8C, + BLOGIC_INQ_EXTSETUP = 0x8D, + BLOGIC_STRICT_RR = 0x8F, + BLOGIC_STORE_LOCALRAM = 0x90, + BLOGIC_FETCH_LOCALRAM = 0x91, + BLOGIC_STORE_TO_EEPROM = 0x92, + BLOGIC_LOAD_AUTOSCSICODE = 0x94, + BLOGIC_MOD_IOADDR = 0x95, + BLOGIC_SETCCB_FMT = 0x96, + BLOGIC_WRITE_INQBUF = 0x9A, + BLOGIC_READ_INQBUF = 0x9B, + BLOGIC_FLASH_LOAD = 0xA7, + BLOGIC_READ_SCAMDATA = 0xA8, + BLOGIC_WRITE_SCAMDATA = 0xA9 }; /* Define the Inquire Board ID reply structure. */ -struct BusLogic_BoardID { - unsigned char BoardType; /* Byte 0 */ - unsigned char CustomFeatures; /* Byte 1 */ - unsigned char FirmwareVersion1stDigit; /* Byte 2 */ - unsigned char FirmwareVersion2ndDigit; /* Byte 3 */ +struct blogic_board_id { + unsigned char type; /* Byte 0 */ + unsigned char custom_features; /* Byte 1 */ + unsigned char fw_ver_digit1; /* Byte 2 */ + unsigned char fw_ver_digit2; /* Byte 3 */ }; /* Define the Inquire Configuration reply structure. */ -struct BusLogic_Configuration { +struct blogic_config { unsigned char:5; /* Byte 0 Bits 0-4 */ - bool DMA_Channel5:1; /* Byte 0 Bit 5 */ - bool DMA_Channel6:1; /* Byte 0 Bit 6 */ - bool DMA_Channel7:1; /* Byte 0 Bit 7 */ - bool IRQ_Channel9:1; /* Byte 1 Bit 0 */ - bool IRQ_Channel10:1; /* Byte 1 Bit 1 */ - bool IRQ_Channel11:1; /* Byte 1 Bit 2 */ - bool IRQ_Channel12:1; /* Byte 1 Bit 3 */ + bool dma_ch5:1; /* Byte 0 Bit 5 */ + bool dma_ch6:1; /* Byte 0 Bit 6 */ + bool dma_ch7:1; /* Byte 0 Bit 7 */ + bool irq_ch9:1; /* Byte 1 Bit 0 */ + bool irq_ch10:1; /* Byte 1 Bit 1 */ + bool irq_ch11:1; /* Byte 1 Bit 2 */ + bool irq_ch12:1; /* Byte 1 Bit 3 */ unsigned char:1; /* Byte 1 Bit 4 */ - bool IRQ_Channel14:1; /* Byte 1 Bit 5 */ - bool IRQ_Channel15:1; /* Byte 1 Bit 6 */ + bool irq_ch14:1; /* Byte 1 Bit 5 */ + bool irq_ch15:1; /* Byte 1 Bit 6 */ unsigned char:1; /* Byte 1 Bit 7 */ - unsigned char HostAdapterID:4; /* Byte 2 Bits 0-3 */ + unsigned char id:4; /* Byte 2 Bits 0-3 */ unsigned char:4; /* Byte 2 Bits 4-7 */ }; @@ -441,42 +430,42 @@ struct BusLogic_Configuration { Define the Inquire Setup Information reply structure. */ -struct BusLogic_SynchronousValue { - unsigned char Offset:4; /* Bits 0-3 */ - unsigned char TransferPeriod:3; /* Bits 4-6 */ - bool Synchronous:1; /* Bit 7 */ +struct blogic_syncval { + unsigned char offset:4; /* Bits 0-3 */ + unsigned char tx_period:3; /* Bits 4-6 */ + bool sync:1; /* Bit 7 */ }; -struct BusLogic_SetupInformation { - bool SynchronousInitiationEnabled:1; /* Byte 0 Bit 0 */ - bool ParityCheckingEnabled:1; /* Byte 0 Bit 1 */ - unsigned char:6; /* Byte 0 Bits 2-7 */ - unsigned char BusTransferRate; /* Byte 1 */ - unsigned char PreemptTimeOnBus; /* Byte 2 */ - unsigned char TimeOffBus; /* Byte 3 */ - unsigned char MailboxCount; /* Byte 4 */ - unsigned char MailboxAddress[3]; /* Bytes 5-7 */ - struct BusLogic_SynchronousValue SynchronousValuesID0to7[8]; /* Bytes 8-15 */ - unsigned char DisconnectPermittedID0to7; /* Byte 16 */ - unsigned char Signature; /* Byte 17 */ - unsigned char CharacterD; /* Byte 18 */ - unsigned char HostBusType; /* Byte 19 */ - unsigned char WideTransfersPermittedID0to7; /* Byte 20 */ - unsigned char WideTransfersActiveID0to7; /* Byte 21 */ - struct BusLogic_SynchronousValue SynchronousValuesID8to15[8]; /* Bytes 22-29 */ - unsigned char DisconnectPermittedID8to15; /* Byte 30 */ - unsigned char:8; /* Byte 31 */ - unsigned char WideTransfersPermittedID8to15; /* Byte 32 */ - unsigned char WideTransfersActiveID8to15; /* Byte 33 */ +struct blogic_setup_info { + bool sync:1; /* Byte 0 Bit 0 */ + bool parity:1; /* Byte 0 Bit 1 */ + unsigned char:6; /* Byte 0 Bits 2-7 */ + unsigned char tx_rate; /* Byte 1 */ + unsigned char preempt_time; /* Byte 2 */ + unsigned char timeoff_bus; /* Byte 3 */ + unsigned char mbox_count; /* Byte 4 */ + unsigned char mbox_addr[3]; /* Bytes 5-7 */ + struct blogic_syncval sync0to7[8]; /* Bytes 8-15 */ + unsigned char disconnect_ok0to7; /* Byte 16 */ + unsigned char sig; /* Byte 17 */ + unsigned char char_d; /* Byte 18 */ + unsigned char bus_type; /* Byte 19 */ + unsigned char wide_tx_ok0to7; /* Byte 20 */ + unsigned char wide_tx_active0to7; /* Byte 21 */ + struct blogic_syncval sync8to15[8]; /* Bytes 22-29 */ + unsigned char disconnect_ok8to15; /* Byte 30 */ + unsigned char:8; /* Byte 31 */ + unsigned char wide_tx_ok8to15; /* Byte 32 */ + unsigned char wide_tx_active8to15; /* Byte 33 */ }; /* Define the Initialize Extended Mailbox request structure. */ -struct BusLogic_ExtendedMailboxRequest { - unsigned char MailboxCount; /* Byte 0 */ - u32 BaseMailboxAddress; /* Bytes 1-4 */ +struct blogic_extmbox_req { + unsigned char mbox_count; /* Byte 0 */ + u32 base_mbox_addr; /* Bytes 1-4 */ } PACKED; @@ -486,63 +475,63 @@ struct BusLogic_ExtendedMailboxRequest { the Modify I/O Address command. */ -enum BusLogic_ISACompatibleIOPort { - BusLogic_IO_330 = 0, - BusLogic_IO_334 = 1, - BusLogic_IO_230 = 2, - BusLogic_IO_234 = 3, - BusLogic_IO_130 = 4, - BusLogic_IO_134 = 5, - BusLogic_IO_Disable = 6, - BusLogic_IO_Disable2 = 7 +enum blogic_isa_ioport { + BLOGIC_IO_330 = 0, + BLOGIC_IO_334 = 1, + BLOGIC_IO_230 = 2, + BLOGIC_IO_234 = 3, + BLOGIC_IO_130 = 4, + BLOGIC_IO_134 = 5, + BLOGIC_IO_DISABLE = 6, + BLOGIC_IO_DISABLE2 = 7 } PACKED; -struct BusLogic_PCIHostAdapterInformation { - enum BusLogic_ISACompatibleIOPort ISACompatibleIOPort; /* Byte 0 */ - unsigned char PCIAssignedIRQChannel; /* Byte 1 */ - bool LowByteTerminated:1; /* Byte 2 Bit 0 */ - bool HighByteTerminated:1; /* Byte 2 Bit 1 */ - unsigned char:2; /* Byte 2 Bits 2-3 */ - bool JP1:1; /* Byte 2 Bit 4 */ - bool JP2:1; /* Byte 2 Bit 5 */ - bool JP3:1; /* Byte 2 Bit 6 */ - bool GenericInfoValid:1;/* Byte 2 Bit 7 */ - unsigned char:8; /* Byte 3 */ +struct blogic_adapter_info { + enum blogic_isa_ioport isa_port; /* Byte 0 */ + unsigned char irq_ch; /* Byte 1 */ + bool low_term:1; /* Byte 2 Bit 0 */ + bool high_term:1; /* Byte 2 Bit 1 */ + unsigned char:2; /* Byte 2 Bits 2-3 */ + bool JP1:1; /* Byte 2 Bit 4 */ + bool JP2:1; /* Byte 2 Bit 5 */ + bool JP3:1; /* Byte 2 Bit 6 */ + bool genericinfo_valid:1; /* Byte 2 Bit 7 */ + unsigned char:8; /* Byte 3 */ }; /* Define the Inquire Extended Setup Information reply structure. */ -struct BusLogic_ExtendedSetupInformation { - unsigned char BusType; /* Byte 0 */ - unsigned char BIOS_Address; /* Byte 1 */ - unsigned short ScatterGatherLimit; /* Bytes 2-3 */ - unsigned char MailboxCount; /* Byte 4 */ - u32 BaseMailboxAddress; /* Bytes 5-8 */ +struct blogic_ext_setup { + unsigned char bus_type; /* Byte 0 */ + unsigned char bios_addr; /* Byte 1 */ + unsigned short sg_limit; /* Bytes 2-3 */ + unsigned char mbox_count; /* Byte 4 */ + u32 base_mbox_addr; /* Bytes 5-8 */ struct { unsigned char:2; /* Byte 9 Bits 0-1 */ - bool FastOnEISA:1; /* Byte 9 Bit 2 */ + bool fast_on_eisa:1; /* Byte 9 Bit 2 */ unsigned char:3; /* Byte 9 Bits 3-5 */ - bool LevelSensitiveInterrupt:1; /* Byte 9 Bit 6 */ + bool level_int:1; /* Byte 9 Bit 6 */ unsigned char:1; /* Byte 9 Bit 7 */ - } Misc; - unsigned char FirmwareRevision[3]; /* Bytes 10-12 */ - bool HostWideSCSI:1; /* Byte 13 Bit 0 */ - bool HostDifferentialSCSI:1; /* Byte 13 Bit 1 */ - bool HostSupportsSCAM:1; /* Byte 13 Bit 2 */ - bool HostUltraSCSI:1; /* Byte 13 Bit 3 */ - bool HostSmartTermination:1; /* Byte 13 Bit 4 */ - unsigned char:3; /* Byte 13 Bits 5-7 */ + } misc; + unsigned char fw_rev[3]; /* Bytes 10-12 */ + bool wide:1; /* Byte 13 Bit 0 */ + bool differential:1; /* Byte 13 Bit 1 */ + bool scam:1; /* Byte 13 Bit 2 */ + bool ultra:1; /* Byte 13 Bit 3 */ + bool smart_term:1; /* Byte 13 Bit 4 */ + unsigned char:3; /* Byte 13 Bits 5-7 */ } PACKED; /* Define the Enable Strict Round Robin Mode request type. */ -enum BusLogic_RoundRobinModeRequest { - BusLogic_AggressiveRoundRobinMode = 0, - BusLogic_StrictRoundRobinMode = 1 +enum blogic_rr_req { + BLOGIC_AGGRESSIVE_RR = 0, + BLOGIC_STRICT_RR_MODE = 1 } PACKED; @@ -550,95 +539,95 @@ enum BusLogic_RoundRobinModeRequest { Define the Fetch Host Adapter Local RAM request type. */ -#define BusLogic_BIOS_BaseOffset 0 -#define BusLogic_AutoSCSI_BaseOffset 64 +#define BLOGIC_BIOS_BASE 0 +#define BLOGIC_AUTOSCSI_BASE 64 -struct BusLogic_FetchHostAdapterLocalRAMRequest { - unsigned char ByteOffset; /* Byte 0 */ - unsigned char ByteCount; /* Byte 1 */ +struct blogic_fetch_localram { + unsigned char offset; /* Byte 0 */ + unsigned char count; /* Byte 1 */ }; /* Define the Host Adapter Local RAM AutoSCSI structure. */ -struct BusLogic_AutoSCSIData { - unsigned char InternalFactorySignature[2]; /* Bytes 0-1 */ - unsigned char InformationByteCount; /* Byte 2 */ - unsigned char HostAdapterType[6]; /* Bytes 3-8 */ - unsigned char:8; /* Byte 9 */ - bool FloppyEnabled:1; /* Byte 10 Bit 0 */ - bool FloppySecondary:1; /* Byte 10 Bit 1 */ - bool LevelSensitiveInterrupt:1; /* Byte 10 Bit 2 */ - unsigned char:2; /* Byte 10 Bits 3-4 */ - unsigned char SystemRAMAreaForBIOS:3; /* Byte 10 Bits 5-7 */ - unsigned char DMA_Channel:7; /* Byte 11 Bits 0-6 */ - bool DMA_AutoConfiguration:1; /* Byte 11 Bit 7 */ - unsigned char IRQ_Channel:7; /* Byte 12 Bits 0-6 */ - bool IRQ_AutoConfiguration:1; /* Byte 12 Bit 7 */ - unsigned char DMA_TransferRate; /* Byte 13 */ - unsigned char SCSI_ID; /* Byte 14 */ - bool LowByteTerminated:1; /* Byte 15 Bit 0 */ - bool ParityCheckingEnabled:1; /* Byte 15 Bit 1 */ - bool HighByteTerminated:1; /* Byte 15 Bit 2 */ - bool NoisyCablingEnvironment:1; /* Byte 15 Bit 3 */ - bool FastSynchronousNegotiation:1; /* Byte 15 Bit 4 */ - bool BusResetEnabled:1; /* Byte 15 Bit 5 */ - bool:1; /* Byte 15 Bit 6 */ - bool ActiveNegationEnabled:1; /* Byte 15 Bit 7 */ - unsigned char BusOnDelay; /* Byte 16 */ - unsigned char BusOffDelay; /* Byte 17 */ - bool HostAdapterBIOSEnabled:1; /* Byte 18 Bit 0 */ - bool BIOSRedirectionOfINT19Enabled:1; /* Byte 18 Bit 1 */ - bool ExtendedTranslationEnabled:1; /* Byte 18 Bit 2 */ - bool MapRemovableAsFixedEnabled:1; /* Byte 18 Bit 3 */ - bool:1; /* Byte 18 Bit 4 */ - bool BIOSSupportsMoreThan2DrivesEnabled:1; /* Byte 18 Bit 5 */ - bool BIOSInterruptModeEnabled:1; /* Byte 18 Bit 6 */ - bool FlopticalSupportEnabled:1; /* Byte 19 Bit 7 */ - unsigned short DeviceEnabled; /* Bytes 19-20 */ - unsigned short WidePermitted; /* Bytes 21-22 */ - unsigned short FastPermitted; /* Bytes 23-24 */ - unsigned short SynchronousPermitted; /* Bytes 25-26 */ - unsigned short DisconnectPermitted; /* Bytes 27-28 */ - unsigned short SendStartUnitCommand; /* Bytes 29-30 */ - unsigned short IgnoreInBIOSScan; /* Bytes 31-32 */ - unsigned char PCIInterruptPin:2; /* Byte 33 Bits 0-1 */ - unsigned char HostAdapterIOPortAddress:2; /* Byte 33 Bits 2-3 */ - bool StrictRoundRobinModeEnabled:1; /* Byte 33 Bit 4 */ - bool VESABusSpeedGreaterThan33MHz:1; /* Byte 33 Bit 5 */ - bool VESABurstWriteEnabled:1; /* Byte 33 Bit 6 */ - bool VESABurstReadEnabled:1; /* Byte 33 Bit 7 */ - unsigned short UltraPermitted; /* Bytes 34-35 */ - unsigned int:32; /* Bytes 36-39 */ - unsigned char:8; /* Byte 40 */ - unsigned char AutoSCSIMaximumLUN; /* Byte 41 */ - bool:1; /* Byte 42 Bit 0 */ - bool SCAM_Dominant:1; /* Byte 42 Bit 1 */ - bool SCAM_Enabled:1; /* Byte 42 Bit 2 */ - bool SCAM_Level2:1; /* Byte 42 Bit 3 */ - unsigned char:4; /* Byte 42 Bits 4-7 */ - bool INT13ExtensionEnabled:1; /* Byte 43 Bit 0 */ - bool:1; /* Byte 43 Bit 1 */ - bool CDROMBootEnabled:1; /* Byte 43 Bit 2 */ - unsigned char:5; /* Byte 43 Bits 3-7 */ - unsigned char BootTargetID:4; /* Byte 44 Bits 0-3 */ - unsigned char BootChannel:4; /* Byte 44 Bits 4-7 */ - unsigned char ForceBusDeviceScanningOrder:1; /* Byte 45 Bit 0 */ - unsigned char:7; /* Byte 45 Bits 1-7 */ - unsigned short NonTaggedToAlternateLUNPermitted; /* Bytes 46-47 */ - unsigned short RenegotiateSyncAfterCheckCondition; /* Bytes 48-49 */ - unsigned char Reserved[10]; /* Bytes 50-59 */ - unsigned char ManufacturingDiagnostic[2]; /* Bytes 60-61 */ - unsigned short Checksum; /* Bytes 62-63 */ +struct blogic_autoscsi { + unsigned char factory_sig[2]; /* Bytes 0-1 */ + unsigned char info_bytes; /* Byte 2 */ + unsigned char adapter_type[6]; /* Bytes 3-8 */ + unsigned char:8; /* Byte 9 */ + bool floppy:1; /* Byte 10 Bit 0 */ + bool floppy_sec:1; /* Byte 10 Bit 1 */ + bool level_int:1; /* Byte 10 Bit 2 */ + unsigned char:2; /* Byte 10 Bits 3-4 */ + unsigned char systemram_bios:3; /* Byte 10 Bits 5-7 */ + unsigned char dma_ch:7; /* Byte 11 Bits 0-6 */ + bool dma_autoconf:1; /* Byte 11 Bit 7 */ + unsigned char irq_ch:7; /* Byte 12 Bits 0-6 */ + bool irq_autoconf:1; /* Byte 12 Bit 7 */ + unsigned char dma_tx_rate; /* Byte 13 */ + unsigned char scsi_id; /* Byte 14 */ + bool low_term:1; /* Byte 15 Bit 0 */ + bool parity:1; /* Byte 15 Bit 1 */ + bool high_term:1; /* Byte 15 Bit 2 */ + bool noisy_cable:1; /* Byte 15 Bit 3 */ + bool fast_sync_neg:1; /* Byte 15 Bit 4 */ + bool reset_enabled:1; /* Byte 15 Bit 5 */ + bool:1; /* Byte 15 Bit 6 */ + bool active_negation:1; /* Byte 15 Bit 7 */ + unsigned char bus_on_delay; /* Byte 16 */ + unsigned char bus_off_delay; /* Byte 17 */ + bool bios_enabled:1; /* Byte 18 Bit 0 */ + bool int19_redir_enabled:1; /* Byte 18 Bit 1 */ + bool ext_trans_enable:1; /* Byte 18 Bit 2 */ + bool removable_as_fixed:1; /* Byte 18 Bit 3 */ + bool:1; /* Byte 18 Bit 4 */ + bool morethan2_drives:1; /* Byte 18 Bit 5 */ + bool bios_int:1; /* Byte 18 Bit 6 */ + bool floptical:1; /* Byte 19 Bit 7 */ + unsigned short dev_enabled; /* Bytes 19-20 */ + unsigned short wide_ok; /* Bytes 21-22 */ + unsigned short fast_ok; /* Bytes 23-24 */ + unsigned short sync_ok; /* Bytes 25-26 */ + unsigned short discon_ok; /* Bytes 27-28 */ + unsigned short send_start_unit; /* Bytes 29-30 */ + unsigned short ignore_bios_scan; /* Bytes 31-32 */ + unsigned char pci_int_pin:2; /* Byte 33 Bits 0-1 */ + unsigned char adapter_ioport:2; /* Byte 33 Bits 2-3 */ + bool strict_rr_enabled:1; /* Byte 33 Bit 4 */ + bool vesabus_33mhzplus:1; /* Byte 33 Bit 5 */ + bool vesa_burst_write:1; /* Byte 33 Bit 6 */ + bool vesa_burst_read:1; /* Byte 33 Bit 7 */ + unsigned short ultra_ok; /* Bytes 34-35 */ + unsigned int:32; /* Bytes 36-39 */ + unsigned char:8; /* Byte 40 */ + unsigned char autoscsi_maxlun; /* Byte 41 */ + bool:1; /* Byte 42 Bit 0 */ + bool scam_dominant:1; /* Byte 42 Bit 1 */ + bool scam_enabled:1; /* Byte 42 Bit 2 */ + bool scam_lev2:1; /* Byte 42 Bit 3 */ + unsigned char:4; /* Byte 42 Bits 4-7 */ + bool int13_exten:1; /* Byte 43 Bit 0 */ + bool:1; /* Byte 43 Bit 1 */ + bool cd_boot:1; /* Byte 43 Bit 2 */ + unsigned char:5; /* Byte 43 Bits 3-7 */ + unsigned char boot_id:4; /* Byte 44 Bits 0-3 */ + unsigned char boot_ch:4; /* Byte 44 Bits 4-7 */ + unsigned char force_scan_order:1; /* Byte 45 Bit 0 */ + unsigned char:7; /* Byte 45 Bits 1-7 */ + unsigned short nontagged_to_alt_ok; /* Bytes 46-47 */ + unsigned short reneg_sync_on_check; /* Bytes 48-49 */ + unsigned char rsvd[10]; /* Bytes 50-59 */ + unsigned char manuf_diag[2]; /* Bytes 60-61 */ + unsigned short cksum; /* Bytes 62-63 */ } PACKED; /* Define the Host Adapter Local RAM Auto SCSI Byte 45 structure. */ -struct BusLogic_AutoSCSIByte45 { - unsigned char ForceBusDeviceScanningOrder:1; /* Bit 0 */ +struct blogic_autoscsi_byte45 { + unsigned char force_scan_order:1; /* Bit 0 */ unsigned char:7; /* Bits 1-7 */ }; @@ -646,13 +635,13 @@ struct BusLogic_AutoSCSIByte45 { Define the Host Adapter Local RAM BIOS Drive Map Byte structure. */ -#define BusLogic_BIOS_DriveMapOffset 17 +#define BLOGIC_BIOS_DRVMAP 17 -struct BusLogic_BIOSDriveMapByte { - unsigned char TargetIDBit3:1; /* Bit 0 */ - unsigned char:2; /* Bits 1-2 */ - enum BusLogic_BIOS_DiskGeometryTranslation DiskGeometry:2; /* Bits 3-4 */ - unsigned char TargetID:3; /* Bits 5-7 */ +struct blogic_bios_drvmap { + unsigned char tgt_idbit3:1; /* Bit 0 */ + unsigned char:2; /* Bits 1-2 */ + enum blogic_bios_diskgeometry diskgeom:2; /* Bits 3-4 */ + unsigned char tgt_id:3; /* Bits 5-7 */ }; /* @@ -660,19 +649,19 @@ struct BusLogic_BIOSDriveMapByte { necessary to support more than 8 Logical Units per Target Device. */ -enum BusLogic_SetCCBFormatRequest { - BusLogic_LegacyLUNFormatCCB = 0, - BusLogic_ExtendedLUNFormatCCB = 1 +enum blogic_setccb_fmt { + BLOGIC_LEGACY_LUN_CCB = 0, + BLOGIC_EXT_LUN_CCB = 1 } PACKED; /* Define the Outgoing Mailbox Action Codes. */ -enum BusLogic_ActionCode { - BusLogic_OutgoingMailboxFree = 0x00, - BusLogic_MailboxStartCommand = 0x01, - BusLogic_MailboxAbortCommand = 0x02 +enum blogic_action { + BLOGIC_OUTBOX_FREE = 0x00, + BLOGIC_MBOX_START = 0x01, + BLOGIC_MBOX_ABORT = 0x02 } PACKED; @@ -682,26 +671,26 @@ enum BusLogic_ActionCode { completion codes are stored in the CCB; it only uses codes 1, 2, 4, and 5. */ -enum BusLogic_CompletionCode { - BusLogic_IncomingMailboxFree = 0x00, - BusLogic_CommandCompletedWithoutError = 0x01, - BusLogic_CommandAbortedAtHostRequest = 0x02, - BusLogic_AbortedCommandNotFound = 0x03, - BusLogic_CommandCompletedWithError = 0x04, - BusLogic_InvalidCCB = 0x05 +enum blogic_cmplt_code { + BLOGIC_INBOX_FREE = 0x00, + BLOGIC_CMD_COMPLETE_GOOD = 0x01, + BLOGIC_CMD_ABORT_BY_HOST = 0x02, + BLOGIC_CMD_NOTFOUND = 0x03, + BLOGIC_CMD_COMPLETE_ERROR = 0x04, + BLOGIC_INVALID_CCB = 0x05 } PACKED; /* Define the Command Control Block (CCB) Opcodes. */ -enum BusLogic_CCB_Opcode { - BusLogic_InitiatorCCB = 0x00, - BusLogic_TargetCCB = 0x01, - BusLogic_InitiatorCCB_ScatterGather = 0x02, - BusLogic_InitiatorCCB_ResidualDataLength = 0x03, - BusLogic_InitiatorCCB_ScatterGatherResidual = 0x04, - BusLogic_BusDeviceReset = 0x81 +enum blogic_ccb_opcode { + BLOGIC_INITIATOR_CCB = 0x00, + BLOGIC_TGT_CCB = 0x01, + BLOGIC_INITIATOR_CCB_SG = 0x02, + BLOGIC_INITIATOR_CCBB_RESIDUAL = 0x03, + BLOGIC_INITIATOR_CCB_SG_RESIDUAL = 0x04, + BLOGIC_BDR = 0x81 } PACKED; @@ -709,11 +698,11 @@ enum BusLogic_CCB_Opcode { Define the CCB Data Direction Codes. */ -enum BusLogic_DataDirection { - BusLogic_UncheckedDataTransfer = 0, - BusLogic_DataInLengthChecked = 1, - BusLogic_DataOutLengthChecked = 2, - BusLogic_NoDataTransfer = 3 +enum blogic_datadir { + BLOGIC_UNCHECKED_TX = 0, + BLOGIC_DATAIN_CHECKED = 1, + BLOGIC_DATAOUT_CHECKED = 2, + BLOGIC_NOTX = 3 }; @@ -722,32 +711,32 @@ enum BusLogic_DataDirection { return status code 0x0C; it uses 0x12 for both overruns and underruns. */ -enum BusLogic_HostAdapterStatus { - BusLogic_CommandCompletedNormally = 0x00, - BusLogic_LinkedCommandCompleted = 0x0A, - BusLogic_LinkedCommandCompletedWithFlag = 0x0B, - BusLogic_DataUnderRun = 0x0C, - BusLogic_SCSISelectionTimeout = 0x11, - BusLogic_DataOverRun = 0x12, - BusLogic_UnexpectedBusFree = 0x13, - BusLogic_InvalidBusPhaseRequested = 0x14, - BusLogic_InvalidOutgoingMailboxActionCode = 0x15, - BusLogic_InvalidCommandOperationCode = 0x16, - BusLogic_LinkedCCBhasInvalidLUN = 0x17, - BusLogic_InvalidCommandParameter = 0x1A, - BusLogic_AutoRequestSenseFailed = 0x1B, - BusLogic_TaggedQueuingMessageRejected = 0x1C, - BusLogic_UnsupportedMessageReceived = 0x1D, - BusLogic_HostAdapterHardwareFailed = 0x20, - BusLogic_TargetFailedResponseToATN = 0x21, - BusLogic_HostAdapterAssertedRST = 0x22, - BusLogic_OtherDeviceAssertedRST = 0x23, - BusLogic_TargetDeviceReconnectedImproperly = 0x24, - BusLogic_HostAdapterAssertedBusDeviceReset = 0x25, - BusLogic_AbortQueueGenerated = 0x26, - BusLogic_HostAdapterSoftwareError = 0x27, - BusLogic_HostAdapterHardwareTimeoutError = 0x30, - BusLogic_SCSIParityErrorDetected = 0x34 +enum blogic_adapter_status { + BLOGIC_CMD_CMPLT_NORMAL = 0x00, + BLOGIC_LINK_CMD_CMPLT = 0x0A, + BLOGIC_LINK_CMD_CMPLT_FLAG = 0x0B, + BLOGIC_DATA_UNDERRUN = 0x0C, + BLOGIC_SELECT_TIMEOUT = 0x11, + BLOGIC_DATA_OVERRUN = 0x12, + BLOGIC_NOEXPECT_BUSFREE = 0x13, + BLOGIC_INVALID_BUSPHASE = 0x14, + BLOGIC_INVALID_OUTBOX_CODE = 0x15, + BLOGIC_INVALID_CMD_CODE = 0x16, + BLOGIC_LINKCCB_BADLUN = 0x17, + BLOGIC_BAD_CMD_PARAM = 0x1A, + BLOGIC_AUTOREQSENSE_FAIL = 0x1B, + BLOGIC_TAGQUEUE_REJECT = 0x1C, + BLOGIC_BAD_MSG_RCVD = 0x1D, + BLOGIC_HW_FAIL = 0x20, + BLOGIC_NORESPONSE_TO_ATN = 0x21, + BLOGIC_HW_RESET = 0x22, + BLOGIC_RST_FROM_OTHERDEV = 0x23, + BLOGIC_BAD_RECONNECT = 0x24, + BLOGIC_HW_BDR = 0x25, + BLOGIC_ABRT_QUEUE = 0x26, + BLOGIC_ADAPTER_SW_ERROR = 0x27, + BLOGIC_HW_TIMEOUT = 0x30, + BLOGIC_PARITY_ERR = 0x34 } PACKED; @@ -755,30 +744,28 @@ enum BusLogic_HostAdapterStatus { Define the SCSI Target Device Status Codes. */ -enum BusLogic_TargetDeviceStatus { - BusLogic_OperationGood = 0x00, - BusLogic_CheckCondition = 0x02, - BusLogic_DeviceBusy = 0x08 +enum blogic_tgt_status { + BLOGIC_OP_GOOD = 0x00, + BLOGIC_CHECKCONDITION = 0x02, + BLOGIC_DEVBUSY = 0x08 } PACKED; /* Define the Queue Tag Codes. */ -enum BusLogic_QueueTag { - BusLogic_SimpleQueueTag = 0, - BusLogic_HeadOfQueueTag = 1, - BusLogic_OrderedQueueTag = 2, - BusLogic_ReservedQT = 3 +enum blogic_queuetag { + BLOGIC_SIMPLETAG = 0, + BLOGIC_HEADTAG = 1, + BLOGIC_ORDEREDTAG = 2, + BLOGIC_RSVDTAG = 3 }; /* Define the SCSI Command Descriptor Block (CDB). */ -#define BusLogic_CDB_MaxLength 12 - -typedef unsigned char SCSI_CDB_T[BusLogic_CDB_MaxLength]; +#define BLOGIC_CDB_MAXLEN 12 /* @@ -786,20 +773,20 @@ typedef unsigned char SCSI_CDB_T[BusLogic_CDB_MaxLength]; Firmware Interface and the FlashPoint SCCB Manager. */ -struct BusLogic_ScatterGatherSegment { - u32 SegmentByteCount; /* Bytes 0-3 */ - u32 SegmentDataPointer; /* Bytes 4-7 */ +struct blogic_sg_seg { + u32 segbytes; /* Bytes 0-3 */ + u32 segdata; /* Bytes 4-7 */ }; /* Define the Driver CCB Status Codes. */ -enum BusLogic_CCB_Status { - BusLogic_CCB_Free = 0, - BusLogic_CCB_Active = 1, - BusLogic_CCB_Completed = 2, - BusLogic_CCB_Reset = 3 +enum blogic_ccb_status { + BLOGIC_CCB_FREE = 0, + BLOGIC_CCB_ACTIVE = 1, + BLOGIC_CCB_COMPLETE = 2, + BLOGIC_CCB_RESET = 3 } PACKED; @@ -822,79 +809,81 @@ enum BusLogic_CCB_Status { 32 Logical Units per Target Device. */ -struct BusLogic_CCB { +struct blogic_ccb { /* MultiMaster Firmware and FlashPoint SCCB Manager Common Portion. */ - enum BusLogic_CCB_Opcode Opcode; /* Byte 0 */ - unsigned char:3; /* Byte 1 Bits 0-2 */ - enum BusLogic_DataDirection DataDirection:2; /* Byte 1 Bits 3-4 */ - bool TagEnable:1; /* Byte 1 Bit 5 */ - enum BusLogic_QueueTag QueueTag:2; /* Byte 1 Bits 6-7 */ - unsigned char CDB_Length; /* Byte 2 */ - unsigned char SenseDataLength; /* Byte 3 */ - u32 DataLength; /* Bytes 4-7 */ - u32 DataPointer; /* Bytes 8-11 */ - unsigned char:8; /* Byte 12 */ - unsigned char:8; /* Byte 13 */ - enum BusLogic_HostAdapterStatus HostAdapterStatus; /* Byte 14 */ - enum BusLogic_TargetDeviceStatus TargetDeviceStatus; /* Byte 15 */ - unsigned char TargetID; /* Byte 16 */ - unsigned char LogicalUnit:5; /* Byte 17 Bits 0-4 */ - bool LegacyTagEnable:1; /* Byte 17 Bit 5 */ - enum BusLogic_QueueTag LegacyQueueTag:2; /* Byte 17 Bits 6-7 */ - SCSI_CDB_T CDB; /* Bytes 18-29 */ - unsigned char:8; /* Byte 30 */ - unsigned char:8; /* Byte 31 */ - unsigned int:32; /* Bytes 32-35 */ - u32 SenseDataPointer; /* Bytes 36-39 */ + enum blogic_ccb_opcode opcode; /* Byte 0 */ + unsigned char:3; /* Byte 1 Bits 0-2 */ + enum blogic_datadir datadir:2; /* Byte 1 Bits 3-4 */ + bool tag_enable:1; /* Byte 1 Bit 5 */ + enum blogic_queuetag queuetag:2; /* Byte 1 Bits 6-7 */ + unsigned char cdblen; /* Byte 2 */ + unsigned char sense_datalen; /* Byte 3 */ + u32 datalen; /* Bytes 4-7 */ + void *data; /* Bytes 8-11 */ + unsigned char:8; /* Byte 12 */ + unsigned char:8; /* Byte 13 */ + enum blogic_adapter_status adapter_status; /* Byte 14 */ + enum blogic_tgt_status tgt_status; /* Byte 15 */ + unsigned char tgt_id; /* Byte 16 */ + unsigned char lun:5; /* Byte 17 Bits 0-4 */ + bool legacytag_enable:1; /* Byte 17 Bit 5 */ + enum blogic_queuetag legacy_tag:2; /* Byte 17 Bits 6-7 */ + unsigned char cdb[BLOGIC_CDB_MAXLEN]; /* Bytes 18-29 */ + unsigned char:8; /* Byte 30 */ + unsigned char:8; /* Byte 31 */ + u32 rsvd_int; /* Bytes 32-35 */ + u32 sensedata; /* Bytes 36-39 */ /* FlashPoint SCCB Manager Defined Portion. */ - void (*CallbackFunction) (struct BusLogic_CCB *); /* Bytes 40-43 */ - u32 BaseAddress; /* Bytes 44-47 */ - enum BusLogic_CompletionCode CompletionCode; /* Byte 48 */ + void (*callback) (struct blogic_ccb *); /* Bytes 40-43 */ + u32 base_addr; /* Bytes 44-47 */ + enum blogic_cmplt_code comp_code; /* Byte 48 */ #ifdef CONFIG_SCSI_FLASHPOINT - unsigned char:8; /* Byte 49 */ - unsigned short OS_Flags; /* Bytes 50-51 */ - unsigned char Private[48]; /* Bytes 52-99 */ + unsigned char:8; /* Byte 49 */ + u16 os_flags; /* Bytes 50-51 */ + unsigned char private[24]; /* Bytes 52-99 */ + void *rsvd1; + void *rsvd2; + unsigned char private2[16]; #endif /* BusLogic Linux Driver Defined Portion. */ - dma_addr_t AllocationGroupHead; - unsigned int AllocationGroupSize; - u32 DMA_Handle; - enum BusLogic_CCB_Status Status; - unsigned long SerialNumber; - struct scsi_cmnd *Command; - struct BusLogic_HostAdapter *HostAdapter; - struct BusLogic_CCB *Next; - struct BusLogic_CCB *NextAll; - struct BusLogic_ScatterGatherSegment - ScatterGatherList[BusLogic_ScatterGatherLimit]; + dma_addr_t allocgrp_head; + unsigned int allocgrp_size; + u32 dma_handle; + enum blogic_ccb_status status; + unsigned long serial; + struct scsi_cmnd *command; + struct blogic_adapter *adapter; + struct blogic_ccb *next; + struct blogic_ccb *next_all; + struct blogic_sg_seg sglist[BLOGIC_SG_LIMIT]; }; /* Define the 32 Bit Mode Outgoing Mailbox structure. */ -struct BusLogic_OutgoingMailbox { - u32 CCB; /* Bytes 0-3 */ - unsigned int:24; /* Bytes 4-6 */ - enum BusLogic_ActionCode ActionCode; /* Byte 7 */ +struct blogic_outbox { + u32 ccb; /* Bytes 0-3 */ + u32:24; /* Bytes 4-6 */ + enum blogic_action action; /* Byte 7 */ }; /* Define the 32 Bit Mode Incoming Mailbox structure. */ -struct BusLogic_IncomingMailbox { - u32 CCB; /* Bytes 0-3 */ - enum BusLogic_HostAdapterStatus HostAdapterStatus; /* Byte 4 */ - enum BusLogic_TargetDeviceStatus TargetDeviceStatus; /* Byte 5 */ - unsigned char:8; /* Byte 6 */ - enum BusLogic_CompletionCode CompletionCode; /* Byte 7 */ +struct blogic_inbox { + u32 ccb; /* Bytes 0-3 */ + enum blogic_adapter_status adapter_status; /* Byte 4 */ + enum blogic_tgt_status tgt_status; /* Byte 5 */ + unsigned char:8; /* Byte 6 */ + enum blogic_cmplt_code comp_code; /* Byte 7 */ }; @@ -902,64 +891,60 @@ struct BusLogic_IncomingMailbox { Define the BusLogic Driver Options structure. */ -struct BusLogic_DriverOptions { - unsigned short TaggedQueuingPermitted; - unsigned short TaggedQueuingPermittedMask; - unsigned short BusSettleTime; - struct BusLogic_LocalOptions LocalOptions; - unsigned char CommonQueueDepth; - unsigned char QueueDepth[BusLogic_MaxTargetDevices]; +struct blogic_drvr_options { + unsigned short tagq_ok; + unsigned short tagq_ok_mask; + unsigned short bus_settle_time; + unsigned short stop_tgt_inquiry; + unsigned char common_qdepth; + unsigned char qdepth[BLOGIC_MAXDEV]; }; /* Define the Host Adapter Target Flags structure. */ -struct BusLogic_TargetFlags { - bool TargetExists:1; - bool TaggedQueuingSupported:1; - bool WideTransfersSupported:1; - bool TaggedQueuingActive:1; - bool WideTransfersActive:1; - bool CommandSuccessfulFlag:1; - bool TargetInfoReported:1; +struct blogic_tgt_flags { + bool tgt_exists:1; + bool tagq_ok:1; + bool wide_ok:1; + bool tagq_active:1; + bool wide_active:1; + bool cmd_good:1; + bool tgt_info_in:1; }; /* Define the Host Adapter Target Statistics structure. */ -#define BusLogic_SizeBuckets 10 - -typedef unsigned int BusLogic_CommandSizeBuckets_T[BusLogic_SizeBuckets]; - -struct BusLogic_TargetStatistics { - unsigned int CommandsAttempted; - unsigned int CommandsCompleted; - unsigned int ReadCommands; - unsigned int WriteCommands; - struct BusLogic_ByteCounter TotalBytesRead; - struct BusLogic_ByteCounter TotalBytesWritten; - BusLogic_CommandSizeBuckets_T ReadCommandSizeBuckets; - BusLogic_CommandSizeBuckets_T WriteCommandSizeBuckets; - unsigned short CommandAbortsRequested; - unsigned short CommandAbortsAttempted; - unsigned short CommandAbortsCompleted; - unsigned short BusDeviceResetsRequested; - unsigned short BusDeviceResetsAttempted; - unsigned short BusDeviceResetsCompleted; - unsigned short HostAdapterResetsRequested; - unsigned short HostAdapterResetsAttempted; - unsigned short HostAdapterResetsCompleted; +#define BLOGIC_SZ_BUCKETS 10 + +struct blogic_tgt_stats { + unsigned int cmds_tried; + unsigned int cmds_complete; + unsigned int read_cmds; + unsigned int write_cmds; + struct blogic_byte_count bytesread; + struct blogic_byte_count byteswritten; + unsigned int read_sz_buckets[BLOGIC_SZ_BUCKETS]; + unsigned int write_sz_buckets[BLOGIC_SZ_BUCKETS]; + unsigned short aborts_request; + unsigned short aborts_tried; + unsigned short aborts_done; + unsigned short bdr_request; + unsigned short bdr_tried; + unsigned short bdr_done; + unsigned short adatper_reset_req; + unsigned short adapter_reset_attempt; + unsigned short adapter_reset_done; }; /* Define the FlashPoint Card Handle data type. */ -#define FlashPoint_BadCardHandle 0xFFFFFFFF - -typedef unsigned int FlashPoint_CardHandle_T; +#define FPOINT_BADCARD_HANDLE 0xFFFFFFFFL /* @@ -967,179 +952,179 @@ typedef unsigned int FlashPoint_CardHandle_T; by the FlashPoint SCCB Manager. */ -struct FlashPoint_Info { - u32 BaseAddress; /* Bytes 0-3 */ - bool Present; /* Byte 4 */ - unsigned char IRQ_Channel; /* Byte 5 */ - unsigned char SCSI_ID; /* Byte 6 */ - unsigned char SCSI_LUN; /* Byte 7 */ - unsigned short FirmwareRevision; /* Bytes 8-9 */ - unsigned short SynchronousPermitted; /* Bytes 10-11 */ - unsigned short FastPermitted; /* Bytes 12-13 */ - unsigned short UltraPermitted; /* Bytes 14-15 */ - unsigned short DisconnectPermitted; /* Bytes 16-17 */ - unsigned short WidePermitted; /* Bytes 18-19 */ - bool ParityCheckingEnabled:1; /* Byte 20 Bit 0 */ - bool HostWideSCSI:1; /* Byte 20 Bit 1 */ - bool HostSoftReset:1; /* Byte 20 Bit 2 */ - bool ExtendedTranslationEnabled:1; /* Byte 20 Bit 3 */ - bool LowByteTerminated:1; /* Byte 20 Bit 4 */ - bool HighByteTerminated:1; /* Byte 20 Bit 5 */ - bool ReportDataUnderrun:1; /* Byte 20 Bit 6 */ - bool SCAM_Enabled:1; /* Byte 20 Bit 7 */ - bool SCAM_Level2:1; /* Byte 21 Bit 0 */ - unsigned char:7; /* Byte 21 Bits 1-7 */ - unsigned char Family; /* Byte 22 */ - unsigned char BusType; /* Byte 23 */ - unsigned char ModelNumber[3]; /* Bytes 24-26 */ - unsigned char RelativeCardNumber; /* Byte 27 */ - unsigned char Reserved[4]; /* Bytes 28-31 */ - unsigned int OS_Reserved; /* Bytes 32-35 */ - unsigned char TranslationInfo[4]; /* Bytes 36-39 */ - unsigned int Reserved2[5]; /* Bytes 40-59 */ - unsigned int SecondaryRange; /* Bytes 60-63 */ +struct fpoint_info { + u32 base_addr; /* Bytes 0-3 */ + bool present; /* Byte 4 */ + unsigned char irq_ch; /* Byte 5 */ + unsigned char scsi_id; /* Byte 6 */ + unsigned char scsi_lun; /* Byte 7 */ + u16 fw_rev; /* Bytes 8-9 */ + u16 sync_ok; /* Bytes 10-11 */ + u16 fast_ok; /* Bytes 12-13 */ + u16 ultra_ok; /* Bytes 14-15 */ + u16 discon_ok; /* Bytes 16-17 */ + u16 wide_ok; /* Bytes 18-19 */ + bool parity:1; /* Byte 20 Bit 0 */ + bool wide:1; /* Byte 20 Bit 1 */ + bool softreset:1; /* Byte 20 Bit 2 */ + bool ext_trans_enable:1; /* Byte 20 Bit 3 */ + bool low_term:1; /* Byte 20 Bit 4 */ + bool high_term:1; /* Byte 20 Bit 5 */ + bool report_underrun:1; /* Byte 20 Bit 6 */ + bool scam_enabled:1; /* Byte 20 Bit 7 */ + bool scam_lev2:1; /* Byte 21 Bit 0 */ + unsigned char:7; /* Byte 21 Bits 1-7 */ + unsigned char family; /* Byte 22 */ + unsigned char bus_type; /* Byte 23 */ + unsigned char model[3]; /* Bytes 24-26 */ + unsigned char relative_cardnum; /* Byte 27 */ + unsigned char rsvd[4]; /* Bytes 28-31 */ + u32 os_rsvd; /* Bytes 32-35 */ + unsigned char translation_info[4]; /* Bytes 36-39 */ + u32 rsvd2[5]; /* Bytes 40-59 */ + u32 sec_range; /* Bytes 60-63 */ }; /* Define the BusLogic Driver Host Adapter structure. */ -struct BusLogic_HostAdapter { - struct Scsi_Host *SCSI_Host; - struct pci_dev *PCI_Device; - enum BusLogic_HostAdapterType HostAdapterType; - enum BusLogic_HostAdapterBusType HostAdapterBusType; - unsigned long IO_Address; - unsigned long PCI_Address; - unsigned short AddressCount; - unsigned char HostNumber; - unsigned char ModelName[9]; - unsigned char FirmwareVersion[6]; - unsigned char FullModelName[18]; - unsigned char Bus; - unsigned char Device; - unsigned char IRQ_Channel; - unsigned char DMA_Channel; - unsigned char SCSI_ID; - bool IRQ_ChannelAcquired:1; - bool DMA_ChannelAcquired:1; - bool ExtendedTranslationEnabled:1; - bool ParityCheckingEnabled:1; - bool BusResetEnabled:1; - bool LevelSensitiveInterrupt:1; - bool HostWideSCSI:1; - bool HostDifferentialSCSI:1; - bool HostSupportsSCAM:1; - bool HostUltraSCSI:1; - bool ExtendedLUNSupport:1; - bool TerminationInfoValid:1; - bool LowByteTerminated:1; - bool HighByteTerminated:1; - bool BounceBuffersRequired:1; - bool StrictRoundRobinModeSupport:1; - bool SCAM_Enabled:1; - bool SCAM_Level2:1; - bool HostAdapterInitialized:1; - bool HostAdapterExternalReset:1; - bool HostAdapterInternalError:1; - bool ProcessCompletedCCBsActive; - volatile bool HostAdapterCommandCompleted; - unsigned short HostAdapterScatterGatherLimit; - unsigned short DriverScatterGatherLimit; - unsigned short MaxTargetDevices; - unsigned short MaxLogicalUnits; - unsigned short MailboxCount; - unsigned short InitialCCBs; - unsigned short IncrementalCCBs; - unsigned short AllocatedCCBs; - unsigned short DriverQueueDepth; - unsigned short HostAdapterQueueDepth; - unsigned short UntaggedQueueDepth; - unsigned short CommonQueueDepth; - unsigned short BusSettleTime; - unsigned short SynchronousPermitted; - unsigned short FastPermitted; - unsigned short UltraPermitted; - unsigned short WidePermitted; - unsigned short DisconnectPermitted; - unsigned short TaggedQueuingPermitted; - unsigned short ExternalHostAdapterResets; - unsigned short HostAdapterInternalErrors; - unsigned short TargetDeviceCount; - unsigned short MessageBufferLength; - u32 BIOS_Address; - struct BusLogic_DriverOptions *DriverOptions; - struct FlashPoint_Info FlashPointInfo; - FlashPoint_CardHandle_T CardHandle; +struct blogic_adapter { + struct Scsi_Host *scsi_host; + struct pci_dev *pci_device; + enum blogic_adapter_type adapter_type; + enum blogic_adapter_bus_type adapter_bus_type; + unsigned long io_addr; + unsigned long pci_addr; + unsigned short addr_count; + unsigned char host_no; + unsigned char model[9]; + unsigned char fw_ver[6]; + unsigned char full_model[18]; + unsigned char bus; + unsigned char dev; + unsigned char irq_ch; + unsigned char dma_ch; + unsigned char scsi_id; + bool irq_acquired:1; + bool dma_chan_acquired:1; + bool ext_trans_enable:1; + bool parity:1; + bool reset_enabled:1; + bool level_int:1; + bool wide:1; + bool differential:1; + bool scam:1; + bool ultra:1; + bool ext_lun:1; + bool terminfo_valid:1; + bool low_term:1; + bool high_term:1; + bool need_bouncebuf:1; + bool strict_rr:1; + bool scam_enabled:1; + bool scam_lev2:1; + bool adapter_initd:1; + bool adapter_extreset:1; + bool adapter_intern_err:1; + bool processing_ccbs; + volatile bool adapter_cmd_complete; + unsigned short adapter_sglimit; + unsigned short drvr_sglimit; + unsigned short maxdev; + unsigned short maxlun; + unsigned short mbox_count; + unsigned short initccbs; + unsigned short inc_ccbs; + unsigned short alloc_ccbs; + unsigned short drvr_qdepth; + unsigned short adapter_qdepth; + unsigned short untag_qdepth; + unsigned short common_qdepth; + unsigned short bus_settle_time; + unsigned short sync_ok; + unsigned short fast_ok; + unsigned short ultra_ok; + unsigned short wide_ok; + unsigned short discon_ok; + unsigned short tagq_ok; + unsigned short ext_resets; + unsigned short adapter_intern_errors; + unsigned short tgt_count; + unsigned short msgbuflen; + u32 bios_addr; + struct blogic_drvr_options *drvr_opts; + struct fpoint_info fpinfo; + void *cardhandle; struct list_head host_list; - struct BusLogic_CCB *All_CCBs; - struct BusLogic_CCB *Free_CCBs; - struct BusLogic_CCB *FirstCompletedCCB; - struct BusLogic_CCB *LastCompletedCCB; - struct BusLogic_CCB *BusDeviceResetPendingCCB[BusLogic_MaxTargetDevices]; - struct BusLogic_TargetFlags TargetFlags[BusLogic_MaxTargetDevices]; - unsigned char QueueDepth[BusLogic_MaxTargetDevices]; - unsigned char SynchronousPeriod[BusLogic_MaxTargetDevices]; - unsigned char SynchronousOffset[BusLogic_MaxTargetDevices]; - unsigned char ActiveCommands[BusLogic_MaxTargetDevices]; - unsigned int CommandsSinceReset[BusLogic_MaxTargetDevices]; - unsigned long LastSequencePoint[BusLogic_MaxTargetDevices]; - unsigned long LastResetAttempted[BusLogic_MaxTargetDevices]; - unsigned long LastResetCompleted[BusLogic_MaxTargetDevices]; - struct BusLogic_OutgoingMailbox *FirstOutgoingMailbox; - struct BusLogic_OutgoingMailbox *LastOutgoingMailbox; - struct BusLogic_OutgoingMailbox *NextOutgoingMailbox; - struct BusLogic_IncomingMailbox *FirstIncomingMailbox; - struct BusLogic_IncomingMailbox *LastIncomingMailbox; - struct BusLogic_IncomingMailbox *NextIncomingMailbox; - struct BusLogic_TargetStatistics TargetStatistics[BusLogic_MaxTargetDevices]; - unsigned char *MailboxSpace; - dma_addr_t MailboxSpaceHandle; - unsigned int MailboxSize; - unsigned long CCB_Offset; - char MessageBuffer[BusLogic_MessageBufferSize]; + struct blogic_ccb *all_ccbs; + struct blogic_ccb *free_ccbs; + struct blogic_ccb *firstccb; + struct blogic_ccb *lastccb; + struct blogic_ccb *bdr_pend[BLOGIC_MAXDEV]; + struct blogic_tgt_flags tgt_flags[BLOGIC_MAXDEV]; + unsigned char qdepth[BLOGIC_MAXDEV]; + unsigned char sync_period[BLOGIC_MAXDEV]; + unsigned char sync_offset[BLOGIC_MAXDEV]; + unsigned char active_cmds[BLOGIC_MAXDEV]; + unsigned int cmds_since_rst[BLOGIC_MAXDEV]; + unsigned long last_seqpoint[BLOGIC_MAXDEV]; + unsigned long last_resettried[BLOGIC_MAXDEV]; + unsigned long last_resetdone[BLOGIC_MAXDEV]; + struct blogic_outbox *first_outbox; + struct blogic_outbox *last_outbox; + struct blogic_outbox *next_outbox; + struct blogic_inbox *first_inbox; + struct blogic_inbox *last_inbox; + struct blogic_inbox *next_inbox; + struct blogic_tgt_stats tgt_stats[BLOGIC_MAXDEV]; + unsigned char *mbox_space; + dma_addr_t mbox_space_handle; + unsigned int mbox_sz; + unsigned long ccb_offset; + char msgbuf[BLOGIC_MSGBUF_SIZE]; }; /* Define a structure for the BIOS Disk Parameters. */ -struct BIOS_DiskParameters { - int Heads; - int Sectors; - int Cylinders; +struct bios_diskparam { + int heads; + int sectors; + int cylinders; }; /* Define a structure for the SCSI Inquiry command results. */ -struct SCSI_Inquiry { - unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */ - unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */ - unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */ - bool RMB:1; /* Byte 1 Bit 7 */ - unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */ - unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */ - unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */ - unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */ - unsigned char:2; /* Byte 3 Bits 4-5 */ - bool TrmIOP:1; /* Byte 3 Bit 6 */ - bool AENC:1; /* Byte 3 Bit 7 */ - unsigned char AdditionalLength; /* Byte 4 */ - unsigned char:8; /* Byte 5 */ - unsigned char:8; /* Byte 6 */ - bool SftRe:1; /* Byte 7 Bit 0 */ - bool CmdQue:1; /* Byte 7 Bit 1 */ - bool:1; /* Byte 7 Bit 2 */ - bool Linked:1; /* Byte 7 Bit 3 */ - bool Sync:1; /* Byte 7 Bit 4 */ - bool WBus16:1; /* Byte 7 Bit 5 */ - bool WBus32:1; /* Byte 7 Bit 6 */ - bool RelAdr:1; /* Byte 7 Bit 7 */ - unsigned char VendorIdentification[8]; /* Bytes 8-15 */ - unsigned char ProductIdentification[16]; /* Bytes 16-31 */ - unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */ +struct scsi_inquiry { + unsigned char devtype:5; /* Byte 0 Bits 0-4 */ + unsigned char dev_qual:3; /* Byte 0 Bits 5-7 */ + unsigned char dev_modifier:7; /* Byte 1 Bits 0-6 */ + bool rmb:1; /* Byte 1 Bit 7 */ + unsigned char ansi_ver:3; /* Byte 2 Bits 0-2 */ + unsigned char ecma_ver:3; /* Byte 2 Bits 3-5 */ + unsigned char iso_ver:2; /* Byte 2 Bits 6-7 */ + unsigned char resp_fmt:4; /* Byte 3 Bits 0-3 */ + unsigned char:2; /* Byte 3 Bits 4-5 */ + bool TrmIOP:1; /* Byte 3 Bit 6 */ + bool AENC:1; /* Byte 3 Bit 7 */ + unsigned char addl_len; /* Byte 4 */ + unsigned char:8; /* Byte 5 */ + unsigned char:8; /* Byte 6 */ + bool SftRe:1; /* Byte 7 Bit 0 */ + bool CmdQue:1; /* Byte 7 Bit 1 */ + bool:1; /* Byte 7 Bit 2 */ + bool linked:1; /* Byte 7 Bit 3 */ + bool sync:1; /* Byte 7 Bit 4 */ + bool WBus16:1; /* Byte 7 Bit 5 */ + bool WBus32:1; /* Byte 7 Bit 6 */ + bool RelAdr:1; /* Byte 7 Bit 7 */ + unsigned char vendor[8]; /* Bytes 8-15 */ + unsigned char product[16]; /* Bytes 16-31 */ + unsigned char product_rev[4]; /* Bytes 32-35 */ }; @@ -1148,185 +1133,170 @@ struct SCSI_Inquiry { Host Adapter I/O Registers. */ -static inline void BusLogic_SCSIBusReset(struct BusLogic_HostAdapter *HostAdapter) +static inline void blogic_busreset(struct blogic_adapter *adapter) { - union BusLogic_ControlRegister ControlRegister; - ControlRegister.All = 0; - ControlRegister.cr.SCSIBusReset = true; - outb(ControlRegister.All, HostAdapter->IO_Address + BusLogic_ControlRegisterOffset); + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.bus_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); } -static inline void BusLogic_InterruptReset(struct BusLogic_HostAdapter *HostAdapter) +static inline void blogic_intreset(struct blogic_adapter *adapter) { - union BusLogic_ControlRegister ControlRegister; - ControlRegister.All = 0; - ControlRegister.cr.InterruptReset = true; - outb(ControlRegister.All, HostAdapter->IO_Address + BusLogic_ControlRegisterOffset); + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.int_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); } -static inline void BusLogic_SoftReset(struct BusLogic_HostAdapter *HostAdapter) +static inline void blogic_softreset(struct blogic_adapter *adapter) { - union BusLogic_ControlRegister ControlRegister; - ControlRegister.All = 0; - ControlRegister.cr.SoftReset = true; - outb(ControlRegister.All, HostAdapter->IO_Address + BusLogic_ControlRegisterOffset); + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.soft_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); } -static inline void BusLogic_HardReset(struct BusLogic_HostAdapter *HostAdapter) +static inline void blogic_hardreset(struct blogic_adapter *adapter) { - union BusLogic_ControlRegister ControlRegister; - ControlRegister.All = 0; - ControlRegister.cr.HardReset = true; - outb(ControlRegister.All, HostAdapter->IO_Address + BusLogic_ControlRegisterOffset); + union blogic_cntrl_reg cr; + cr.all = 0; + cr.cr.hard_reset = true; + outb(cr.all, adapter->io_addr + BLOGIC_CNTRL_REG); } -static inline unsigned char BusLogic_ReadStatusRegister(struct BusLogic_HostAdapter *HostAdapter) +static inline unsigned char blogic_rdstatus(struct blogic_adapter *adapter) { - return inb(HostAdapter->IO_Address + BusLogic_StatusRegisterOffset); + return inb(adapter->io_addr + BLOGIC_STATUS_REG); } -static inline void BusLogic_WriteCommandParameterRegister(struct BusLogic_HostAdapter - *HostAdapter, unsigned char Value) +static inline void blogic_setcmdparam(struct blogic_adapter *adapter, + unsigned char value) { - outb(Value, HostAdapter->IO_Address + BusLogic_CommandParameterRegisterOffset); + outb(value, adapter->io_addr + BLOGIC_CMD_PARM_REG); } -static inline unsigned char BusLogic_ReadDataInRegister(struct BusLogic_HostAdapter *HostAdapter) +static inline unsigned char blogic_rddatain(struct blogic_adapter *adapter) { - return inb(HostAdapter->IO_Address + BusLogic_DataInRegisterOffset); + return inb(adapter->io_addr + BLOGIC_DATAIN_REG); } -static inline unsigned char BusLogic_ReadInterruptRegister(struct BusLogic_HostAdapter *HostAdapter) +static inline unsigned char blogic_rdint(struct blogic_adapter *adapter) { - return inb(HostAdapter->IO_Address + BusLogic_InterruptRegisterOffset); + return inb(adapter->io_addr + BLOGIC_INT_REG); } -static inline unsigned char BusLogic_ReadGeometryRegister(struct BusLogic_HostAdapter *HostAdapter) +static inline unsigned char blogic_rdgeom(struct blogic_adapter *adapter) { - return inb(HostAdapter->IO_Address + BusLogic_GeometryRegisterOffset); + return inb(adapter->io_addr + BLOGIC_GEOMETRY_REG); } /* - BusLogic_StartMailboxCommand issues an Execute Mailbox Command, which + blogic_execmbox issues an Execute Mailbox Command, which notifies the Host Adapter that an entry has been made in an Outgoing Mailbox. */ -static inline void BusLogic_StartMailboxCommand(struct BusLogic_HostAdapter *HostAdapter) +static inline void blogic_execmbox(struct blogic_adapter *adapter) { - BusLogic_WriteCommandParameterRegister(HostAdapter, BusLogic_ExecuteMailboxCommand); + blogic_setcmdparam(adapter, BLOGIC_EXEC_MBOX_CMD); } /* - BusLogic_Delay waits for Seconds to elapse. + blogic_delay waits for Seconds to elapse. */ -static inline void BusLogic_Delay(int Seconds) -{ - mdelay(1000 * Seconds); -} - -/* - Virtual_to_Bus and Bus_to_Virtual map between Kernel Virtual Addresses - and PCI/VLB/EISA/ISA Bus Addresses. -*/ - -static inline u32 Virtual_to_Bus(void *VirtualAddress) -{ - return (u32) virt_to_bus(VirtualAddress); -} - -static inline void *Bus_to_Virtual(u32 BusAddress) +static inline void blogic_delay(int seconds) { - return (void *) bus_to_virt(BusAddress); + mdelay(1000 * seconds); } /* - Virtual_to_32Bit_Virtual maps between Kernel Virtual Addresses and + virt_to_32bit_virt maps between Kernel Virtual Addresses and 32 bit Kernel Virtual Addresses. This avoids compilation warnings on 64 bit architectures. */ -static inline u32 Virtual_to_32Bit_Virtual(void *VirtualAddress) +static inline u32 virt_to_32bit_virt(void *virt_addr) { - return (u32) (unsigned long) VirtualAddress; + return (u32) (unsigned long) virt_addr; } /* - BusLogic_IncrementErrorCounter increments Error Counter by 1, stopping at + blogic_inc_count increments counter by 1, stopping at 65535 rather than wrapping around to 0. */ -static inline void BusLogic_IncrementErrorCounter(unsigned short *ErrorCounter) +static inline void blogic_inc_count(unsigned short *count) { - if (*ErrorCounter < 65535) - (*ErrorCounter)++; + if (*count < 65535) + (*count)++; } /* - BusLogic_IncrementByteCounter increments Byte Counter by Amount. + blogic_addcount increments Byte Counter by Amount. */ -static inline void BusLogic_IncrementByteCounter(struct BusLogic_ByteCounter - *ByteCounter, unsigned int Amount) +static inline void blogic_addcount(struct blogic_byte_count *bytecount, + unsigned int amount) { - ByteCounter->Units += Amount; - if (ByteCounter->Units > 999999999) { - ByteCounter->Units -= 1000000000; - ByteCounter->Billions++; + bytecount->units += amount; + if (bytecount->units > 999999999) { + bytecount->units -= 1000000000; + bytecount->billions++; } } /* - BusLogic_IncrementSizeBucket increments the Bucket for Amount. + blogic_incszbucket increments the Bucket for Amount. */ -static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T CommandSizeBuckets, unsigned int Amount) +static inline void blogic_incszbucket(unsigned int *cmdsz_buckets, + unsigned int amount) { - int Index = 0; - if (Amount < 8 * 1024) { - if (Amount < 2 * 1024) - Index = (Amount < 1 * 1024 ? 0 : 1); + int index = 0; + if (amount < 8 * 1024) { + if (amount < 2 * 1024) + index = (amount < 1 * 1024 ? 0 : 1); else - Index = (Amount < 4 * 1024 ? 2 : 3); - } else if (Amount < 128 * 1024) { - if (Amount < 32 * 1024) - Index = (Amount < 16 * 1024 ? 4 : 5); + index = (amount < 4 * 1024 ? 2 : 3); + } else if (amount < 128 * 1024) { + if (amount < 32 * 1024) + index = (amount < 16 * 1024 ? 4 : 5); else - Index = (Amount < 64 * 1024 ? 6 : 7); + index = (amount < 64 * 1024 ? 6 : 7); } else - Index = (Amount < 256 * 1024 ? 8 : 9); - CommandSizeBuckets[Index]++; + index = (amount < 256 * 1024 ? 8 : 9); + cmdsz_buckets[index]++; } /* Define the version number of the FlashPoint Firmware (SCCB Manager). */ -#define FlashPoint_FirmwareVersion "5.02" +#define FLASHPOINT_FW_VER "5.02" /* Define the possible return values from FlashPoint_HandleInterrupt. */ -#define FlashPoint_NormalInterrupt 0x00 -#define FlashPoint_InternalError 0xFE -#define FlashPoint_ExternalBusReset 0xFF +#define FPOINT_NORMAL_INT 0x00 +#define FPOINT_INTERN_ERR 0xFE +#define FPOINT_EXT_RESET 0xFF /* Define prototypes for the forward referenced BusLogic Driver Internal Functions. */ -static const char *BusLogic_DriverInfo(struct Scsi_Host *); -static int BusLogic_QueueCommand(struct Scsi_Host *h, struct scsi_cmnd *); -static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *); -static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int); -static int BusLogic_SlaveConfigure(struct scsi_device *); -static void BusLogic_QueueCompletedCCB(struct BusLogic_CCB *); -static irqreturn_t BusLogic_InterruptHandler(int, void *); -static int BusLogic_ResetHostAdapter(struct BusLogic_HostAdapter *, bool HardReset); -static void BusLogic_Message(enum BusLogic_MessageLevel, char *, struct BusLogic_HostAdapter *, ...); -static int __init BusLogic_Setup(char *); +static const char *blogic_drvr_info(struct Scsi_Host *); +static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *); +static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *); +static int blogic_slaveconfig(struct scsi_device *); +static void blogic_qcompleted_ccb(struct blogic_ccb *); +static irqreturn_t blogic_inthandler(int, void *); +static int blogic_resetadapter(struct blogic_adapter *, bool hard_reset); +static void blogic_msg(enum blogic_msglevel, char *, struct blogic_adapter *, ...); +static int __init blogic_setup(char *); #endif /* _BUSLOGIC_H */ diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c index dcd716d6860..5c74e4c52fe 100644 --- a/drivers/scsi/FlashPoint.c +++ b/drivers/scsi/FlashPoint.c @@ -29,27 +29,27 @@ struct sccb; typedef void (*CALL_BK_FN) (struct sccb *); struct sccb_mgr_info { - unsigned long si_baseaddr; + u32 si_baseaddr; unsigned char si_present; unsigned char si_intvect; unsigned char si_id; unsigned char si_lun; - unsigned short si_fw_revision; - unsigned short si_per_targ_init_sync; - unsigned short si_per_targ_fast_nego; - unsigned short si_per_targ_ultra_nego; - unsigned short si_per_targ_no_disc; - unsigned short si_per_targ_wide_nego; - unsigned short si_flags; + u16 si_fw_revision; + u16 si_per_targ_init_sync; + u16 si_per_targ_fast_nego; + u16 si_per_targ_ultra_nego; + u16 si_per_targ_no_disc; + u16 si_per_targ_wide_nego; + u16 si_flags; unsigned char si_card_family; unsigned char si_bustype; unsigned char si_card_model[3]; unsigned char si_relative_cardnum; unsigned char si_reserved[4]; - unsigned long si_OS_reserved; + u32 si_OS_reserved; unsigned char si_XlatInfo[4]; - unsigned long si_reserved2[5]; - unsigned long si_secondary_range; + u32 si_reserved2[5]; + u32 si_secondary_range; }; #define SCSI_PARITY_ENA 0x0001 @@ -70,14 +70,14 @@ struct sccb_mgr_info { * The UCB Manager treats the SCCB as it's 'native hardware structure' */ -#pragma pack(1) +/*#pragma pack(1)*/ struct sccb { unsigned char OperationCode; unsigned char ControlByte; unsigned char CdbLength; unsigned char RequestSenseLength; - unsigned long DataLength; - unsigned long DataPointer; + u32 DataLength; + void *DataPointer; unsigned char CcbRes[2]; unsigned char HostStatus; unsigned char TargetStatus; @@ -86,32 +86,32 @@ struct sccb { unsigned char Cdb[12]; unsigned char CcbRes1; unsigned char Reserved1; - unsigned long Reserved2; - unsigned long SensePointer; + u32 Reserved2; + u32 SensePointer; CALL_BK_FN SccbCallback; /* VOID (*SccbCallback)(); */ - unsigned long SccbIOPort; /* Identifies board base port */ + u32 SccbIOPort; /* Identifies board base port */ unsigned char SccbStatus; unsigned char SCCBRes2; - unsigned short SccbOSFlags; - - unsigned long Sccb_XferCnt; /* actual transfer count */ - unsigned long Sccb_ATC; - unsigned long SccbVirtDataPtr; /* virtual addr for OS/2 */ - unsigned long Sccb_res1; - unsigned short Sccb_MGRFlags; - unsigned short Sccb_sgseg; + u16 SccbOSFlags; + + u32 Sccb_XferCnt; /* actual transfer count */ + u32 Sccb_ATC; + u32 SccbVirtDataPtr; /* virtual addr for OS/2 */ + u32 Sccb_res1; + u16 Sccb_MGRFlags; + u16 Sccb_sgseg; unsigned char Sccb_scsimsg; /* identify msg for selection */ unsigned char Sccb_tag; unsigned char Sccb_scsistat; unsigned char Sccb_idmsg; /* image of last msg in */ struct sccb *Sccb_forwardlink; struct sccb *Sccb_backlink; - unsigned long Sccb_savedATC; + u32 Sccb_savedATC; unsigned char Save_Cdb[6]; unsigned char Save_CdbLen; unsigned char Sccb_XferState; - unsigned long Sccb_SGoffset; + u32 Sccb_SGoffset; }; #pragma pack() @@ -223,15 +223,21 @@ struct sccb_mgr_tar_info { }; struct nvram_info { - unsigned char niModel; /* Model No. of card */ - unsigned char niCardNo; /* Card no. */ - unsigned long niBaseAddr; /* Port Address of card */ - unsigned char niSysConf; /* Adapter Configuration byte - Byte 16 of eeprom map */ - unsigned char niScsiConf; /* SCSI Configuration byte - Byte 17 of eeprom map */ - unsigned char niScamConf; /* SCAM Configuration byte - Byte 20 of eeprom map */ - unsigned char niAdapId; /* Host Adapter ID - Byte 24 of eerpom map */ - unsigned char niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte of targets */ - unsigned char niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name string of Targets */ + unsigned char niModel; /* Model No. of card */ + unsigned char niCardNo; /* Card no. */ + u32 niBaseAddr; /* Port Address of card */ + unsigned char niSysConf; /* Adapter Configuration byte - + Byte 16 of eeprom map */ + unsigned char niScsiConf; /* SCSI Configuration byte - + Byte 17 of eeprom map */ + unsigned char niScamConf; /* SCAM Configuration byte - + Byte 20 of eeprom map */ + unsigned char niAdapId; /* Host Adapter ID - + Byte 24 of eerpom map */ + unsigned char niSyncTbl[MAX_SCSI_TAR / 2]; /* Sync/Wide byte + of targets */ + unsigned char niScamTbl[MAX_SCSI_TAR][4]; /* Compressed Scam name + string of Targets */ }; #define MODEL_LT 1 @@ -243,7 +249,7 @@ struct sccb_card { struct sccb *currentSCCB; struct sccb_mgr_info *cardInfo; - unsigned long ioPort; + u32 ioPort; unsigned short cmdCounter; unsigned char discQCount; @@ -780,37 +786,37 @@ typedef struct SCCBscam_info { #define MENABLE_INT(p_port) (WR_HARPOON(p_port+hp_page_ctrl, \ (RD_HARPOON(p_port+hp_page_ctrl) & ~G_INT_DISABLE))) -static unsigned char FPT_sisyncn(unsigned long port, unsigned char p_card, +static unsigned char FPT_sisyncn(u32 port, unsigned char p_card, unsigned char syncFlag); -static void FPT_ssel(unsigned long port, unsigned char p_card); -static void FPT_sres(unsigned long port, unsigned char p_card, +static void FPT_ssel(u32 port, unsigned char p_card); +static void FPT_sres(u32 port, unsigned char p_card, struct sccb_card *pCurrCard); -static void FPT_shandem(unsigned long port, unsigned char p_card, +static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB); -static void FPT_stsyncn(unsigned long port, unsigned char p_card); -static void FPT_sisyncr(unsigned long port, unsigned char sync_pulse, +static void FPT_stsyncn(u32 port, unsigned char p_card); +static void FPT_sisyncr(u32 port, unsigned char sync_pulse, unsigned char offset); -static void FPT_sssyncv(unsigned long p_port, unsigned char p_id, +static void FPT_sssyncv(u32 p_port, unsigned char p_id, unsigned char p_sync_value, struct sccb_mgr_tar_info *currTar_Info); -static void FPT_sresb(unsigned long port, unsigned char p_card); -static void FPT_sxfrp(unsigned long p_port, unsigned char p_card); -static void FPT_schkdd(unsigned long port, unsigned char p_card); -static unsigned char FPT_RdStack(unsigned long port, unsigned char index); -static void FPT_WrStack(unsigned long portBase, unsigned char index, +static void FPT_sresb(u32 port, unsigned char p_card); +static void FPT_sxfrp(u32 p_port, unsigned char p_card); +static void FPT_schkdd(u32 port, unsigned char p_card); +static unsigned char FPT_RdStack(u32 port, unsigned char index); +static void FPT_WrStack(u32 portBase, unsigned char index, unsigned char data); -static unsigned char FPT_ChkIfChipInitialized(unsigned long ioPort); +static unsigned char FPT_ChkIfChipInitialized(u32 ioPort); -static void FPT_SendMsg(unsigned long port, unsigned char message); +static void FPT_SendMsg(u32 port, unsigned char message); static void FPT_queueFlushTargSccb(unsigned char p_card, unsigned char thisTarg, unsigned char error_code); static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card); static void FPT_RNVRamData(struct nvram_info *pNvRamInfo); -static unsigned char FPT_siwidn(unsigned long port, unsigned char p_card); -static void FPT_stwidn(unsigned long port, unsigned char p_card); -static void FPT_siwidr(unsigned long port, unsigned char width); +static unsigned char FPT_siwidn(u32 port, unsigned char p_card); +static void FPT_stwidn(u32 port, unsigned char p_card); +static void FPT_siwidr(u32 port, unsigned char width); static void FPT_queueSelectFail(struct sccb_card *pCurrCard, unsigned char p_card); @@ -827,45 +833,45 @@ static void FPT_utilUpdateResidual(struct sccb *p_SCCB); static unsigned short FPT_CalcCrc16(unsigned char buffer[]); static unsigned char FPT_CalcLrc(unsigned char buffer[]); -static void FPT_Wait1Second(unsigned long p_port); -static void FPT_Wait(unsigned long p_port, unsigned char p_delay); -static void FPT_utilEEWriteOnOff(unsigned long p_port, unsigned char p_mode); -static void FPT_utilEEWrite(unsigned long p_port, unsigned short ee_data, +static void FPT_Wait1Second(u32 p_port); +static void FPT_Wait(u32 p_port, unsigned char p_delay); +static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode); +static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data, unsigned short ee_addr); -static unsigned short FPT_utilEERead(unsigned long p_port, +static unsigned short FPT_utilEERead(u32 p_port, unsigned short ee_addr); -static unsigned short FPT_utilEEReadOrg(unsigned long p_port, +static unsigned short FPT_utilEEReadOrg(u32 p_port, unsigned short ee_addr); -static void FPT_utilEESendCmdAddr(unsigned long p_port, unsigned char ee_cmd, +static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd, unsigned short ee_addr); -static void FPT_phaseDataOut(unsigned long port, unsigned char p_card); -static void FPT_phaseDataIn(unsigned long port, unsigned char p_card); -static void FPT_phaseCommand(unsigned long port, unsigned char p_card); -static void FPT_phaseStatus(unsigned long port, unsigned char p_card); -static void FPT_phaseMsgOut(unsigned long port, unsigned char p_card); -static void FPT_phaseMsgIn(unsigned long port, unsigned char p_card); -static void FPT_phaseIllegal(unsigned long port, unsigned char p_card); +static void FPT_phaseDataOut(u32 port, unsigned char p_card); +static void FPT_phaseDataIn(u32 port, unsigned char p_card); +static void FPT_phaseCommand(u32 port, unsigned char p_card); +static void FPT_phaseStatus(u32 port, unsigned char p_card); +static void FPT_phaseMsgOut(u32 port, unsigned char p_card); +static void FPT_phaseMsgIn(u32 port, unsigned char p_card); +static void FPT_phaseIllegal(u32 port, unsigned char p_card); -static void FPT_phaseDecode(unsigned long port, unsigned char p_card); -static void FPT_phaseChkFifo(unsigned long port, unsigned char p_card); -static void FPT_phaseBusFree(unsigned long p_port, unsigned char p_card); +static void FPT_phaseDecode(u32 port, unsigned char p_card); +static void FPT_phaseChkFifo(u32 port, unsigned char p_card); +static void FPT_phaseBusFree(u32 p_port, unsigned char p_card); -static void FPT_XbowInit(unsigned long port, unsigned char scamFlg); -static void FPT_BusMasterInit(unsigned long p_port); -static void FPT_DiagEEPROM(unsigned long p_port); +static void FPT_XbowInit(u32 port, unsigned char scamFlg); +static void FPT_BusMasterInit(u32 p_port); +static void FPT_DiagEEPROM(u32 p_port); -static void FPT_dataXferProcessor(unsigned long port, +static void FPT_dataXferProcessor(u32 port, struct sccb_card *pCurrCard); -static void FPT_busMstrSGDataXferStart(unsigned long port, +static void FPT_busMstrSGDataXferStart(u32 port, struct sccb *pCurrSCCB); -static void FPT_busMstrDataXferStart(unsigned long port, +static void FPT_busMstrDataXferStart(u32 port, struct sccb *pCurrSCCB); -static void FPT_hostDataXferAbort(unsigned long port, unsigned char p_card, +static void FPT_hostDataXferAbort(u32 port, unsigned char p_card, struct sccb *pCurrSCCB); static void FPT_hostDataXferRestart(struct sccb *currSCCB); -static unsigned char FPT_SccbMgr_bad_isr(unsigned long p_port, +static unsigned char FPT_SccbMgr_bad_isr(u32 p_port, unsigned char p_card, struct sccb_card *pCurrCard, unsigned short p_int); @@ -879,28 +885,28 @@ static void FPT_SccbMgrTableInitTarget(unsigned char p_card, static void FPT_scini(unsigned char p_card, unsigned char p_our_id, unsigned char p_power_up); -static int FPT_scarb(unsigned long p_port, unsigned char p_sel_type); -static void FPT_scbusf(unsigned long p_port); -static void FPT_scsel(unsigned long p_port); -static void FPT_scasid(unsigned char p_card, unsigned long p_port); -static unsigned char FPT_scxferc(unsigned long p_port, unsigned char p_data); -static unsigned char FPT_scsendi(unsigned long p_port, +static int FPT_scarb(u32 p_port, unsigned char p_sel_type); +static void FPT_scbusf(u32 p_port); +static void FPT_scsel(u32 p_port); +static void FPT_scasid(unsigned char p_card, u32 p_port); +static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data); +static unsigned char FPT_scsendi(u32 p_port, unsigned char p_id_string[]); -static unsigned char FPT_sciso(unsigned long p_port, +static unsigned char FPT_sciso(u32 p_port, unsigned char p_id_string[]); -static void FPT_scwirod(unsigned long p_port, unsigned char p_data_bit); -static void FPT_scwiros(unsigned long p_port, unsigned char p_data_bit); +static void FPT_scwirod(u32 p_port, unsigned char p_data_bit); +static void FPT_scwiros(u32 p_port, unsigned char p_data_bit); static unsigned char FPT_scvalq(unsigned char p_quintet); -static unsigned char FPT_scsell(unsigned long p_port, unsigned char targ_id); -static void FPT_scwtsel(unsigned long p_port); -static void FPT_inisci(unsigned char p_card, unsigned long p_port, +static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id); +static void FPT_scwtsel(u32 p_port); +static void FPT_inisci(unsigned char p_card, u32 p_port, unsigned char p_our_id); -static void FPT_scsavdi(unsigned char p_card, unsigned long p_port); +static void FPT_scsavdi(unsigned char p_card, u32 p_port); static unsigned char FPT_scmachid(unsigned char p_card, unsigned char p_id_string[]); -static void FPT_autoCmdCmplt(unsigned long p_port, unsigned char p_card); -static void FPT_autoLoadDefaultMap(unsigned long p_port); +static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card); +static void FPT_autoLoadDefaultMap(u32 p_port); static struct sccb_mgr_tar_info FPT_sccbMgrTbl[MAX_CARDS][MAX_SCSI_TAR] = { {{0}} }; @@ -918,7 +924,7 @@ static unsigned char FPT_scamHAString[] = static unsigned short FPT_default_intena = 0; -static void (*FPT_s_PhaseTbl[8]) (unsigned long, unsigned char) = { +static void (*FPT_s_PhaseTbl[8]) (u32, unsigned char) = { 0}; /*--------------------------------------------------------------------- @@ -935,7 +941,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) unsigned char i, j, id, ScamFlg; unsigned short temp, temp2, temp3, temp4, temp5, temp6; - unsigned long ioport; + u32 ioport; struct nvram_info *pCurrNvRam; ioport = pCardInfo->si_baseaddr; @@ -1201,23 +1207,21 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo) * *---------------------------------------------------------------------*/ -static unsigned long FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info +static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info *pCardInfo) { struct sccb_card *CurrCard = NULL; struct nvram_info *pCurrNvRam; unsigned char i, j, thisCard, ScamFlg; unsigned short temp, sync_bit_map, id; - unsigned long ioport; + u32 ioport; ioport = pCardInfo->si_baseaddr; for (thisCard = 0; thisCard <= MAX_CARDS; thisCard++) { - if (thisCard == MAX_CARDS) { - - return FAILURE; - } + if (thisCard == MAX_CARDS) + return (void *)FAILURE; if (FPT_BL_Card[thisCard].ioPort == ioport) { @@ -1384,16 +1388,16 @@ static unsigned long FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info (unsigned char)(RD_HARPOON((ioport + hp_semaphore)) | SCCB_MGR_PRESENT)); - return (unsigned long)CurrCard; + return (void *)CurrCard; } -static void FlashPoint_ReleaseHostAdapter(unsigned long pCurrCard) +static void FlashPoint_ReleaseHostAdapter(void *pCurrCard) { unsigned char i; - unsigned long portBase; - unsigned long regOffset; - unsigned long scamData; - unsigned long *pScamTbl; + u32 portBase; + u32 regOffset; + u32 scamData; + u32 *pScamTbl; struct nvram_info *pCurrNvRam; pCurrNvRam = ((struct sccb_card *)pCurrCard)->pNvRamInfo; @@ -1414,7 +1418,7 @@ static void FlashPoint_ReleaseHostAdapter(unsigned long pCurrCard) for (i = 0; i < MAX_SCSI_TAR; i++) { regOffset = hp_aramBase + 64 + i * 4; - pScamTbl = (unsigned long *)&pCurrNvRam->niScamTbl[i]; + pScamTbl = (u32 *)&pCurrNvRam->niScamTbl[i]; scamData = *pScamTbl; WR_HARP32(portBase, regOffset, scamData); } @@ -1427,10 +1431,10 @@ static void FlashPoint_ReleaseHostAdapter(unsigned long pCurrCard) static void FPT_RNVRamData(struct nvram_info *pNvRamInfo) { unsigned char i; - unsigned long portBase; - unsigned long regOffset; - unsigned long scamData; - unsigned long *pScamTbl; + u32 portBase; + u32 regOffset; + u32 scamData; + u32 *pScamTbl; pNvRamInfo->niModel = FPT_RdStack(pNvRamInfo->niBaseAddr, 0); pNvRamInfo->niSysConf = FPT_RdStack(pNvRamInfo->niBaseAddr, 1); @@ -1447,26 +1451,25 @@ static void FPT_RNVRamData(struct nvram_info *pNvRamInfo) for (i = 0; i < MAX_SCSI_TAR; i++) { regOffset = hp_aramBase + 64 + i * 4; RD_HARP32(portBase, regOffset, scamData); - pScamTbl = (unsigned long *)&pNvRamInfo->niScamTbl[i]; + pScamTbl = (u32 *)&pNvRamInfo->niScamTbl[i]; *pScamTbl = scamData; } } -static unsigned char FPT_RdStack(unsigned long portBase, unsigned char index) +static unsigned char FPT_RdStack(u32 portBase, unsigned char index) { WR_HARPOON(portBase + hp_stack_addr, index); return RD_HARPOON(portBase + hp_stack_data); } -static void FPT_WrStack(unsigned long portBase, unsigned char index, - unsigned char data) +static void FPT_WrStack(u32 portBase, unsigned char index, unsigned char data) { WR_HARPOON(portBase + hp_stack_addr, index); WR_HARPOON(portBase + hp_stack_data, data); } -static unsigned char FPT_ChkIfChipInitialized(unsigned long ioPort) +static unsigned char FPT_ChkIfChipInitialized(u32 ioPort) { if ((RD_HARPOON(ioPort + hp_arb_id) & 0x0f) != FPT_RdStack(ioPort, 4)) return 0; @@ -1489,15 +1492,16 @@ static unsigned char FPT_ChkIfChipInitialized(unsigned long ioPort) * callback function. * *---------------------------------------------------------------------*/ -static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb) +static void FlashPoint_StartCCB(void *curr_card, struct sccb *p_Sccb) { - unsigned long ioport; + u32 ioport; unsigned char thisCard, lun; struct sccb *pSaveSccb; CALL_BK_FN callback; + struct sccb_card *pCurrCard = curr_card; - thisCard = ((struct sccb_card *)pCurrCard)->cardIndex; - ioport = ((struct sccb_card *)pCurrCard)->ioPort; + thisCard = pCurrCard->cardIndex; + ioport = pCurrCard->ioPort; if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) { @@ -1512,18 +1516,18 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb) FPT_sinits(p_Sccb, thisCard); - if (!((struct sccb_card *)pCurrCard)->cmdCounter) { + if (!pCurrCard->cmdCounter) { WR_HARPOON(ioport + hp_semaphore, (RD_HARPOON(ioport + hp_semaphore) | SCCB_MGR_ACTIVE)); - if (((struct sccb_card *)pCurrCard)->globalFlags & F_GREEN_PC) { + if (pCurrCard->globalFlags & F_GREEN_PC) { WR_HARPOON(ioport + hp_clkctrl_0, CLKCTRL_DEFAULT); WR_HARPOON(ioport + hp_sys_ctrl, 0x00); } } - ((struct sccb_card *)pCurrCard)->cmdCounter++; + pCurrCard->cmdCounter++; if (RD_HARPOON(ioport + hp_semaphore) & BIOS_IN_USE) { @@ -1532,10 +1536,10 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb) | TICKLE_ME)); if (p_Sccb->OperationCode == RESET_COMMAND) { pSaveSccb = - ((struct sccb_card *)pCurrCard)->currentSCCB; - ((struct sccb_card *)pCurrCard)->currentSCCB = p_Sccb; + pCurrCard->currentSCCB; + pCurrCard->currentSCCB = p_Sccb; FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); - ((struct sccb_card *)pCurrCard)->currentSCCB = + pCurrCard->currentSCCB = pSaveSccb; } else { FPT_queueAddSccb(p_Sccb, thisCard); @@ -1546,10 +1550,10 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb) if (p_Sccb->OperationCode == RESET_COMMAND) { pSaveSccb = - ((struct sccb_card *)pCurrCard)->currentSCCB; - ((struct sccb_card *)pCurrCard)->currentSCCB = p_Sccb; + pCurrCard->currentSCCB; + pCurrCard->currentSCCB = p_Sccb; FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); - ((struct sccb_card *)pCurrCard)->currentSCCB = + pCurrCard->currentSCCB = pSaveSccb; } else { FPT_queueAddSccb(p_Sccb, thisCard); @@ -1560,34 +1564,29 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb) MDISABLE_INT(ioport); - if ((((struct sccb_card *)pCurrCard)->globalFlags & F_CONLUN_IO) - && + if ((pCurrCard->globalFlags & F_CONLUN_IO) && ((FPT_sccbMgrTbl[thisCard][p_Sccb->TargID]. TarStatus & TAR_TAG_Q_MASK) != TAG_Q_TRYING)) lun = p_Sccb->Lun; else lun = 0; - if ((((struct sccb_card *)pCurrCard)->currentSCCB == NULL) && + if ((pCurrCard->currentSCCB == NULL) && (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarSelQ_Cnt == 0) && (FPT_sccbMgrTbl[thisCard][p_Sccb->TargID].TarLUNBusy[lun] == 0)) { - ((struct sccb_card *)pCurrCard)->currentSCCB = p_Sccb; + pCurrCard->currentSCCB = p_Sccb; FPT_ssel(p_Sccb->SccbIOPort, thisCard); } else { if (p_Sccb->OperationCode == RESET_COMMAND) { - pSaveSccb = - ((struct sccb_card *)pCurrCard)-> - currentSCCB; - ((struct sccb_card *)pCurrCard)->currentSCCB = - p_Sccb; + pSaveSccb = pCurrCard->currentSCCB; + pCurrCard->currentSCCB = p_Sccb; FPT_queueSelectFail(&FPT_BL_Card[thisCard], thisCard); - ((struct sccb_card *)pCurrCard)->currentSCCB = - pSaveSccb; + pCurrCard->currentSCCB = pSaveSccb; } else { FPT_queueAddSccb(p_Sccb, thisCard); } @@ -1607,9 +1606,9 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb) * callback function. * *---------------------------------------------------------------------*/ -static int FlashPoint_AbortCCB(unsigned long pCurrCard, struct sccb *p_Sccb) +static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb) { - unsigned long ioport; + u32 ioport; unsigned char thisCard; CALL_BK_FN callback; @@ -1715,9 +1714,9 @@ static int FlashPoint_AbortCCB(unsigned long pCurrCard, struct sccb *p_Sccb) * interrupt for this card and disable the IRQ Pin if so. * *---------------------------------------------------------------------*/ -static unsigned char FlashPoint_InterruptPending(unsigned long pCurrCard) +static unsigned char FlashPoint_InterruptPending(void *pCurrCard) { - unsigned long ioport; + u32 ioport; ioport = ((struct sccb_card *)pCurrCard)->ioPort; @@ -1739,38 +1738,36 @@ static unsigned char FlashPoint_InterruptPending(unsigned long pCurrCard) * us. * *---------------------------------------------------------------------*/ -static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) +static int FlashPoint_HandleInterrupt(void *pcard) { struct sccb *currSCCB; unsigned char thisCard, result, bm_status, bm_int_st; unsigned short hp_int; unsigned char i, target; - unsigned long ioport; + struct sccb_card *pCurrCard = pcard; + u32 ioport; - thisCard = ((struct sccb_card *)pCurrCard)->cardIndex; - ioport = ((struct sccb_card *)pCurrCard)->ioPort; + thisCard = pCurrCard->cardIndex; + ioport = pCurrCard->ioPort; MDISABLE_INT(ioport); if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON) - bm_status = - RD_HARPOON(ioport + - hp_ext_status) & (unsigned char)BAD_EXT_STATUS; + bm_status = RD_HARPOON(ioport + hp_ext_status) & + (unsigned char)BAD_EXT_STATUS; else bm_status = 0; WR_HARPOON(ioport + hp_int_mask, (INT_CMD_COMPL | SCSI_INTERRUPT)); - while ((hp_int = - RDW_HARPOON((ioport + - hp_intstat)) & FPT_default_intena) | bm_status) { + while ((hp_int = RDW_HARPOON((ioport + hp_intstat)) & + FPT_default_intena) | bm_status) { - currSCCB = ((struct sccb_card *)pCurrCard)->currentSCCB; + currSCCB = pCurrCard->currentSCCB; if (hp_int & (FIFO | TIMEOUT | RESET | SCAM_SEL) || bm_status) { result = - FPT_SccbMgr_bad_isr(ioport, thisCard, - ((struct sccb_card *)pCurrCard), + FPT_SccbMgr_bad_isr(ioport, thisCard, pCurrCard, hp_int); WRW_HARPOON((ioport + hp_intstat), (FIFO | TIMEOUT | RESET | SCAM_SEL)); @@ -1796,8 +1793,7 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) (BUS_FREE | RSEL))) ; } - if (((struct sccb_card *)pCurrCard)-> - globalFlags & F_HOST_XFER_ACT) + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) FPT_phaseChkFifo(ioport, thisCard); @@ -1813,14 +1809,11 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) else if (hp_int & ITAR_DISC) { - if (((struct sccb_card *)pCurrCard)-> - globalFlags & F_HOST_XFER_ACT) { - + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) FPT_phaseChkFifo(ioport, thisCard); - } - - if (RD_HARPOON(ioport + hp_gp_reg_1) == SMSAVE_DATA_PTR) { + if (RD_HARPOON(ioport + hp_gp_reg_1) == + SMSAVE_DATA_PTR) { WR_HARPOON(ioport + hp_gp_reg_1, 0x00); currSCCB->Sccb_XferState |= F_NO_DATA_YET; @@ -1859,8 +1852,7 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) WRW_HARPOON((ioport + hp_intstat), (BUS_FREE | ITAR_DISC)); - ((struct sccb_card *)pCurrCard)->globalFlags |= - F_NEW_SCCB_CMD; + pCurrCard->globalFlags |= F_NEW_SCCB_CMD; } @@ -1870,10 +1862,8 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) (PROG_HLT | RSEL | PHASE | BUS_FREE)); if (RDW_HARPOON((ioport + hp_intstat)) & ITAR_DISC) { - if (((struct sccb_card *)pCurrCard)-> - globalFlags & F_HOST_XFER_ACT) { + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) FPT_phaseChkFifo(ioport, thisCard); - } if (RD_HARPOON(ioport + hp_gp_reg_1) == SMSAVE_DATA_PTR) { @@ -1890,8 +1880,7 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) FPT_queueDisconnect(currSCCB, thisCard); } - FPT_sres(ioport, thisCard, - ((struct sccb_card *)pCurrCard)); + FPT_sres(ioport, thisCard, pCurrCard); FPT_phaseDecode(ioport, thisCard); } @@ -1948,8 +1937,7 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) WRW_HARPOON((ioport + hp_intstat), BUS_FREE); - if (((struct sccb_card *)pCurrCard)-> - globalFlags & F_HOST_XFER_ACT) { + if (pCurrCard->globalFlags & F_HOST_XFER_ACT) { FPT_hostDataXferAbort(ioport, thisCard, currSCCB); @@ -1961,27 +1949,19 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) else if (hp_int & ITICKLE) { WRW_HARPOON((ioport + hp_intstat), ITICKLE); - ((struct sccb_card *)pCurrCard)->globalFlags |= - F_NEW_SCCB_CMD; + pCurrCard->globalFlags |= F_NEW_SCCB_CMD; } if (((struct sccb_card *)pCurrCard)-> globalFlags & F_NEW_SCCB_CMD) { - ((struct sccb_card *)pCurrCard)->globalFlags &= - ~F_NEW_SCCB_CMD; + pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD; - if (((struct sccb_card *)pCurrCard)->currentSCCB == - NULL) { - - FPT_queueSearchSelect(((struct sccb_card *) - pCurrCard), thisCard); - } + if (pCurrCard->currentSCCB == NULL) + FPT_queueSearchSelect(pCurrCard, thisCard); - if (((struct sccb_card *)pCurrCard)->currentSCCB != - NULL) { - ((struct sccb_card *)pCurrCard)->globalFlags &= - ~F_NEW_SCCB_CMD; + if (pCurrCard->currentSCCB != NULL) { + pCurrCard->globalFlags &= ~F_NEW_SCCB_CMD; FPT_ssel(ioport, thisCard); } @@ -2006,8 +1986,7 @@ static int FlashPoint_HandleInterrupt(unsigned long pCurrCard) * processing time. * *---------------------------------------------------------------------*/ -static unsigned char FPT_SccbMgr_bad_isr(unsigned long p_port, - unsigned char p_card, +static unsigned char FPT_SccbMgr_bad_isr(u32 p_port, unsigned char p_card, struct sccb_card *pCurrCard, unsigned short p_int) { @@ -2254,7 +2233,7 @@ static void FPT_SccbMgrTableInitTarget(unsigned char p_card, * *---------------------------------------------------------------------*/ -static unsigned char FPT_sfm(unsigned long port, struct sccb *pCurrSCCB) +static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB) { unsigned char message; unsigned short TimeOutLoop; @@ -2322,12 +2301,12 @@ static unsigned char FPT_sfm(unsigned long port, struct sccb *pCurrSCCB) * *---------------------------------------------------------------------*/ -static void FPT_ssel(unsigned long port, unsigned char p_card) +static void FPT_ssel(u32 port, unsigned char p_card) { unsigned char auto_loaded, i, target, *theCCB; - unsigned long cdb_reg; + u32 cdb_reg; struct sccb_card *CurrCard; struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; @@ -2621,7 +2600,7 @@ static void FPT_ssel(unsigned long port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_sres(unsigned long port, unsigned char p_card, +static void FPT_sres(u32 port, unsigned char p_card, struct sccb_card *pCurrCard) { @@ -2857,7 +2836,7 @@ static void FPT_sres(unsigned long port, unsigned char p_card, (RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) ; } -static void FPT_SendMsg(unsigned long port, unsigned char message) +static void FPT_SendMsg(u32 port, unsigned char message) { while (!(RD_HARPOON(port + hp_scsisig) & SCSI_REQ)) { if (!(RD_HARPOON(port + hp_scsisig) & SCSI_BSY)) { @@ -2904,8 +2883,7 @@ static void FPT_SendMsg(unsigned long port, unsigned char message) * target device. * *---------------------------------------------------------------------*/ -static void FPT_sdecm(unsigned char message, unsigned long port, - unsigned char p_card) +static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card) { struct sccb *currSCCB; struct sccb_card *CurrCard; @@ -3085,8 +3063,7 @@ static void FPT_sdecm(unsigned char message, unsigned long port, * Description: Decide what to do with the extended message. * *---------------------------------------------------------------------*/ -static void FPT_shandem(unsigned long port, unsigned char p_card, - struct sccb *pCurrSCCB) +static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB) { unsigned char length, message; @@ -3153,7 +3130,7 @@ static void FPT_shandem(unsigned long port, unsigned char p_card, * *---------------------------------------------------------------------*/ -static unsigned char FPT_sisyncn(unsigned long port, unsigned char p_card, +static unsigned char FPT_sisyncn(u32 port, unsigned char p_card, unsigned char syncFlag) { struct sccb *currSCCB; @@ -3234,7 +3211,7 @@ static unsigned char FPT_sisyncn(unsigned long port, unsigned char p_card, * necessary. * *---------------------------------------------------------------------*/ -static void FPT_stsyncn(unsigned long port, unsigned char p_card) +static void FPT_stsyncn(u32 port, unsigned char p_card) { unsigned char sync_msg, offset, sync_reg, our_sync_msg; struct sccb *currSCCB; @@ -3363,7 +3340,7 @@ static void FPT_stsyncn(unsigned long port, unsigned char p_card) * Description: Answer the targets sync message. * *---------------------------------------------------------------------*/ -static void FPT_sisyncr(unsigned long port, unsigned char sync_pulse, +static void FPT_sisyncr(u32 port, unsigned char sync_pulse, unsigned char offset) { ARAM_ACCESS(port); @@ -3394,7 +3371,7 @@ static void FPT_sisyncr(unsigned long port, unsigned char sync_pulse, * *---------------------------------------------------------------------*/ -static unsigned char FPT_siwidn(unsigned long port, unsigned char p_card) +static unsigned char FPT_siwidn(u32 port, unsigned char p_card) { struct sccb *currSCCB; struct sccb_mgr_tar_info *currTar_Info; @@ -3449,7 +3426,7 @@ static unsigned char FPT_siwidn(unsigned long port, unsigned char p_card) * necessary. * *---------------------------------------------------------------------*/ -static void FPT_stwidn(unsigned long port, unsigned char p_card) +static void FPT_stwidn(u32 port, unsigned char p_card) { unsigned char width; struct sccb *currSCCB; @@ -3520,7 +3497,7 @@ static void FPT_stwidn(unsigned long port, unsigned char p_card) * Description: Answer the targets Wide nego message. * *---------------------------------------------------------------------*/ -static void FPT_siwidr(unsigned long port, unsigned char width) +static void FPT_siwidr(u32 port, unsigned char width) { ARAM_ACCESS(port); WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT)); @@ -3548,7 +3525,7 @@ static void FPT_siwidr(unsigned long port, unsigned char width) * ID specified. * *---------------------------------------------------------------------*/ -static void FPT_sssyncv(unsigned long p_port, unsigned char p_id, +static void FPT_sssyncv(u32 p_port, unsigned char p_id, unsigned char p_sync_value, struct sccb_mgr_tar_info *currTar_Info) { @@ -3620,7 +3597,7 @@ static void FPT_sssyncv(unsigned long p_port, unsigned char p_id, * Description: Reset the desired card's SCSI bus. * *---------------------------------------------------------------------*/ -static void FPT_sresb(unsigned long port, unsigned char p_card) +static void FPT_sresb(u32 port, unsigned char p_card) { unsigned char scsiID, i; @@ -3713,7 +3690,7 @@ static void FPT_ssenss(struct sccb_card *pCurrCard) currSCCB->Cdb[4] = currSCCB->RequestSenseLength; currSCCB->Cdb[5] = 0x00; - currSCCB->Sccb_XferCnt = (unsigned long)currSCCB->RequestSenseLength; + currSCCB->Sccb_XferCnt = (u32)currSCCB->RequestSenseLength; currSCCB->Sccb_ATC = 0x00; @@ -3737,7 +3714,7 @@ static void FPT_ssenss(struct sccb_card *pCurrCard) * *---------------------------------------------------------------------*/ -static void FPT_sxfrp(unsigned long p_port, unsigned char p_card) +static void FPT_sxfrp(u32 p_port, unsigned char p_card) { unsigned char curr_phz; @@ -3819,7 +3796,7 @@ static void FPT_sxfrp(unsigned long p_port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_schkdd(unsigned long port, unsigned char p_card) +static void FPT_schkdd(u32 port, unsigned char p_card) { unsigned short TimeOutLoop; unsigned char sPhase; @@ -3998,10 +3975,10 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseDecode(unsigned long p_port, unsigned char p_card) +static void FPT_phaseDecode(u32 p_port, unsigned char p_card) { unsigned char phase_ref; - void (*phase) (unsigned long, unsigned char); + void (*phase) (u32, unsigned char); DISABLE_AUTO(p_port); @@ -4021,7 +3998,7 @@ static void FPT_phaseDecode(unsigned long p_port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseDataOut(unsigned long port, unsigned char p_card) +static void FPT_phaseDataOut(u32 port, unsigned char p_card) { struct sccb *currSCCB; @@ -4062,7 +4039,7 @@ static void FPT_phaseDataOut(unsigned long port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseDataIn(unsigned long port, unsigned char p_card) +static void FPT_phaseDataIn(u32 port, unsigned char p_card) { struct sccb *currSCCB; @@ -4106,10 +4083,10 @@ static void FPT_phaseDataIn(unsigned long port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseCommand(unsigned long p_port, unsigned char p_card) +static void FPT_phaseCommand(u32 p_port, unsigned char p_card) { struct sccb *currSCCB; - unsigned long cdb_reg; + u32 cdb_reg; unsigned char i; currSCCB = FPT_BL_Card[p_card].currentSCCB; @@ -4157,7 +4134,7 @@ static void FPT_phaseCommand(unsigned long p_port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseStatus(unsigned long port, unsigned char p_card) +static void FPT_phaseStatus(u32 port, unsigned char p_card) { /* Start-up the automation to finish off this command and let the isr handle the interrupt for command complete when it comes in. @@ -4178,7 +4155,7 @@ static void FPT_phaseStatus(unsigned long port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseMsgOut(unsigned long port, unsigned char p_card) +static void FPT_phaseMsgOut(u32 port, unsigned char p_card) { unsigned char message, scsiID; struct sccb *currSCCB; @@ -4317,7 +4294,7 @@ static void FPT_phaseMsgOut(unsigned long port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseMsgIn(unsigned long port, unsigned char p_card) +static void FPT_phaseMsgIn(u32 port, unsigned char p_card) { unsigned char message; struct sccb *currSCCB; @@ -4364,7 +4341,7 @@ static void FPT_phaseMsgIn(unsigned long port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseIllegal(unsigned long port, unsigned char p_card) +static void FPT_phaseIllegal(u32 port, unsigned char p_card) { struct sccb *currSCCB; @@ -4390,9 +4367,9 @@ static void FPT_phaseIllegal(unsigned long port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_phaseChkFifo(unsigned long port, unsigned char p_card) +static void FPT_phaseChkFifo(u32 port, unsigned char p_card) { - unsigned long xfercnt; + u32 xfercnt; struct sccb *currSCCB; currSCCB = FPT_BL_Card[p_card].currentSCCB; @@ -4461,7 +4438,7 @@ static void FPT_phaseChkFifo(unsigned long port, unsigned char p_card) * because of command complete or from a disconnect. * *---------------------------------------------------------------------*/ -static void FPT_phaseBusFree(unsigned long port, unsigned char p_card) +static void FPT_phaseBusFree(u32 port, unsigned char p_card) { struct sccb *currSCCB; @@ -4557,9 +4534,9 @@ static void FPT_phaseBusFree(unsigned long port, unsigned char p_card) * Description: Load the Automation RAM with the defualt map values. * *---------------------------------------------------------------------*/ -static void FPT_autoLoadDefaultMap(unsigned long p_port) +static void FPT_autoLoadDefaultMap(u32 p_port) { - unsigned long map_addr; + u32 map_addr; ARAM_ACCESS(p_port); map_addr = p_port + hp_aramBase; @@ -4663,7 +4640,7 @@ static void FPT_autoLoadDefaultMap(unsigned long p_port) * *---------------------------------------------------------------------*/ -static void FPT_autoCmdCmplt(unsigned long p_port, unsigned char p_card) +static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card) { struct sccb *currSCCB; unsigned char status_byte; @@ -4936,8 +4913,7 @@ static void FPT_autoCmdCmplt(unsigned long p_port, unsigned char p_card) * *---------------------------------------------------------------------*/ -static void FPT_dataXferProcessor(unsigned long port, - struct sccb_card *pCurrCard) +static void FPT_dataXferProcessor(u32 port, struct sccb_card *pCurrCard) { struct sccb *currSCCB; @@ -4970,22 +4946,18 @@ static void FPT_dataXferProcessor(unsigned long port, * Description: * *---------------------------------------------------------------------*/ -static void FPT_busMstrSGDataXferStart(unsigned long p_port, - struct sccb *pcurrSCCB) +static void FPT_busMstrSGDataXferStart(u32 p_port, struct sccb *pcurrSCCB) { - unsigned long count, addr, tmpSGCnt; + u32 count, addr, tmpSGCnt; unsigned int sg_index; unsigned char sg_count, i; - unsigned long reg_offset; - - if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) { + u32 reg_offset; + struct blogic_sg_seg *segp; - count = ((unsigned long)HOST_RD_CMD) << 24; - } - - else { - count = ((unsigned long)HOST_WRT_CMD) << 24; - } + if (pcurrSCCB->Sccb_XferState & F_HOST_XFER_DIR) + count = ((u32)HOST_RD_CMD) << 24; + else + count = ((u32)HOST_WRT_CMD) << 24; sg_count = 0; tmpSGCnt = 0; @@ -4998,25 +4970,20 @@ static void FPT_busMstrSGDataXferStart(unsigned long p_port, WR_HARPOON(p_port + hp_page_ctrl, i); while ((sg_count < (unsigned char)SG_BUF_CNT) && - ((unsigned long)(sg_index * (unsigned int)SG_ELEMENT_SIZE) < - pcurrSCCB->DataLength)) { - - tmpSGCnt += *(((unsigned long *)pcurrSCCB->DataPointer) + - (sg_index * 2)); - - count |= *(((unsigned long *)pcurrSCCB->DataPointer) + - (sg_index * 2)); + ((sg_index * (unsigned int)SG_ELEMENT_SIZE) < + pcurrSCCB->DataLength)) { - addr = *(((unsigned long *)pcurrSCCB->DataPointer) + - ((sg_index * 2) + 1)); + segp = (struct blogic_sg_seg *)(pcurrSCCB->DataPointer) + + sg_index; + tmpSGCnt += segp->segbytes; + count |= segp->segbytes; + addr = segp->segdata; if ((!sg_count) && (pcurrSCCB->Sccb_SGoffset)) { - addr += ((count & 0x00FFFFFFL) - pcurrSCCB->Sccb_SGoffset); count = (count & 0xFF000000L) | pcurrSCCB->Sccb_SGoffset; - tmpSGCnt = count & 0x00FFFFFFL; } @@ -5072,17 +5039,15 @@ static void FPT_busMstrSGDataXferStart(unsigned long p_port, * Description: * *---------------------------------------------------------------------*/ -static void FPT_busMstrDataXferStart(unsigned long p_port, - struct sccb *pcurrSCCB) +static void FPT_busMstrDataXferStart(u32 p_port, struct sccb *pcurrSCCB) { - unsigned long addr, count; + u32 addr, count; if (!(pcurrSCCB->Sccb_XferState & F_AUTO_SENSE)) { count = pcurrSCCB->Sccb_XferCnt; - addr = - (unsigned long)pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC; + addr = (u32)(unsigned long)pcurrSCCB->DataPointer + pcurrSCCB->Sccb_ATC; } else { @@ -5127,7 +5092,7 @@ static void FPT_busMstrDataXferStart(unsigned long p_port, * command busy is also time out, it'll just give up. * *---------------------------------------------------------------------*/ -static unsigned char FPT_busMstrTimeOut(unsigned long p_port) +static unsigned char FPT_busMstrTimeOut(u32 p_port) { unsigned long timeout; @@ -5166,13 +5131,14 @@ static unsigned char FPT_busMstrTimeOut(unsigned long p_port) * Description: Abort any in progress transfer. * *---------------------------------------------------------------------*/ -static void FPT_hostDataXferAbort(unsigned long port, unsigned char p_card, +static void FPT_hostDataXferAbort(u32 port, unsigned char p_card, struct sccb *pCurrSCCB) { unsigned long timeout; unsigned long remain_cnt; - unsigned int sg_ptr; + u32 sg_ptr; + struct blogic_sg_seg *segp; FPT_BL_Card[p_card].globalFlags &= ~F_HOST_XFER_ACT; @@ -5236,9 +5202,8 @@ static void FPT_hostDataXferAbort(unsigned long port, unsigned char p_card, (unsigned int)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE)) { - sg_ptr = - (unsigned int)(pCurrSCCB->DataLength / - SG_ELEMENT_SIZE); + sg_ptr = (u32)(pCurrSCCB->DataLength / + SG_ELEMENT_SIZE); } remain_cnt = pCurrSCCB->Sccb_XferCnt; @@ -5246,23 +5211,13 @@ static void FPT_hostDataXferAbort(unsigned long port, unsigned char p_card, while (remain_cnt < 0x01000000L) { sg_ptr--; - - if (remain_cnt > - (unsigned - long)(*(((unsigned long *)pCurrSCCB-> - DataPointer) + (sg_ptr * 2)))) { - + segp = (struct blogic_sg_seg *)(pCurrSCCB-> + DataPointer) + (sg_ptr * 2); + if (remain_cnt > (unsigned long)segp->segbytes) remain_cnt -= - (unsigned - long)(*(((unsigned long *) - pCurrSCCB->DataPointer) + - (sg_ptr * 2))); - } - - else { - + (unsigned long)segp->segbytes; + else break; - } } if (remain_cnt < 0x01000000L) { @@ -5418,23 +5373,18 @@ static void FPT_hostDataXferAbort(unsigned long port, unsigned char p_card, pCurrSCCB->Sccb_SGoffset = 0x00; - if ((unsigned long)(pCurrSCCB->Sccb_sgseg * - SG_ELEMENT_SIZE) >= - pCurrSCCB->DataLength) { + if ((u32)(pCurrSCCB->Sccb_sgseg * SG_ELEMENT_SIZE) >= + pCurrSCCB->DataLength) { pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED; - pCurrSCCB->Sccb_sgseg = (unsigned short)(pCurrSCCB->DataLength / SG_ELEMENT_SIZE); - } } else { - if (!(pCurrSCCB->Sccb_XferState & F_AUTO_SENSE)) - pCurrSCCB->Sccb_XferState |= F_ALL_XFERRED; } } @@ -5454,21 +5404,22 @@ static void FPT_hostDataXferRestart(struct sccb *currSCCB) { unsigned long data_count; unsigned int sg_index; - unsigned long *sg_ptr; + struct blogic_sg_seg *segp; if (currSCCB->Sccb_XferState & F_SG_XFER) { currSCCB->Sccb_XferCnt = 0; sg_index = 0xffff; /*Index by long words into sg list. */ - data_count = 0; /*Running count of SG xfer counts. */ + data_count = 0; /*Running count of SG xfer counts. */ - sg_ptr = (unsigned long *)currSCCB->DataPointer; while (data_count < currSCCB->Sccb_ATC) { sg_index++; - data_count += *(sg_ptr + (sg_index * 2)); + segp = (struct blogic_sg_seg *)(currSCCB->DataPointer) + + (sg_index * 2); + data_count += segp->segbytes; } if (data_count == currSCCB->Sccb_ATC) { @@ -5504,7 +5455,7 @@ static void FPT_scini(unsigned char p_card, unsigned char p_our_id, { unsigned char loser, assigned_id; - unsigned long p_port; + u32 p_port; unsigned char i, k, ScamFlg; struct sccb_card *currCard; @@ -5709,7 +5660,7 @@ static void FPT_scini(unsigned char p_card, unsigned char p_our_id, * *---------------------------------------------------------------------*/ -static int FPT_scarb(unsigned long p_port, unsigned char p_sel_type) +static int FPT_scarb(u32 p_port, unsigned char p_sel_type) { if (p_sel_type == INIT_SELTD) { @@ -5771,7 +5722,7 @@ static int FPT_scarb(unsigned long p_port, unsigned char p_sel_type) * *---------------------------------------------------------------------*/ -static void FPT_scbusf(unsigned long p_port) +static void FPT_scbusf(u32 p_port) { WR_HARPOON(p_port + hp_page_ctrl, (RD_HARPOON(p_port + hp_page_ctrl) | G_INT_DISABLE)); @@ -5803,7 +5754,7 @@ static void FPT_scbusf(unsigned long p_port) * *---------------------------------------------------------------------*/ -static void FPT_scasid(unsigned char p_card, unsigned long p_port) +static void FPT_scasid(unsigned char p_card, u32 p_port) { unsigned char temp_id_string[ID_STRING_LENGTH]; @@ -5880,7 +5831,7 @@ static void FPT_scasid(unsigned char p_card, unsigned long p_port) * *---------------------------------------------------------------------*/ -static void FPT_scsel(unsigned long p_port) +static void FPT_scsel(u32 p_port) { WR_HARPOON(p_port + hp_scsisig, SCSI_SEL); @@ -5914,7 +5865,7 @@ static void FPT_scsel(unsigned long p_port) * *---------------------------------------------------------------------*/ -static unsigned char FPT_scxferc(unsigned long p_port, unsigned char p_data) +static unsigned char FPT_scxferc(u32 p_port, unsigned char p_data) { unsigned char curr_data, ret_data; @@ -5964,8 +5915,7 @@ static unsigned char FPT_scxferc(unsigned long p_port, unsigned char p_data) * *---------------------------------------------------------------------*/ -static unsigned char FPT_scsendi(unsigned long p_port, - unsigned char p_id_string[]) +static unsigned char FPT_scsendi(u32 p_port, unsigned char p_id_string[]) { unsigned char ret_data, byte_cnt, bit_cnt, defer; @@ -6016,8 +5966,7 @@ static unsigned char FPT_scsendi(unsigned long p_port, * *---------------------------------------------------------------------*/ -static unsigned char FPT_sciso(unsigned long p_port, - unsigned char p_id_string[]) +static unsigned char FPT_sciso(u32 p_port, unsigned char p_id_string[]) { unsigned char ret_data, the_data, byte_cnt, bit_cnt; @@ -6075,7 +6024,7 @@ static unsigned char FPT_sciso(unsigned long p_port, * *---------------------------------------------------------------------*/ -static void FPT_scwirod(unsigned long p_port, unsigned char p_data_bit) +static void FPT_scwirod(u32 p_port, unsigned char p_data_bit) { unsigned char i; @@ -6102,7 +6051,7 @@ static void FPT_scwirod(unsigned long p_port, unsigned char p_data_bit) * *---------------------------------------------------------------------*/ -static void FPT_scwiros(unsigned long p_port, unsigned char p_data_bit) +static void FPT_scwiros(u32 p_port, unsigned char p_data_bit) { unsigned char i; @@ -6154,7 +6103,7 @@ static unsigned char FPT_scvalq(unsigned char p_quintet) * *---------------------------------------------------------------------*/ -static unsigned char FPT_scsell(unsigned long p_port, unsigned char targ_id) +static unsigned char FPT_scsell(u32 p_port, unsigned char targ_id) { unsigned long i; @@ -6236,7 +6185,7 @@ static unsigned char FPT_scsell(unsigned long p_port, unsigned char targ_id) * *---------------------------------------------------------------------*/ -static void FPT_scwtsel(unsigned long p_port) +static void FPT_scwtsel(u32 p_port) { while (!(RDW_HARPOON((p_port + hp_intstat)) & SCAM_SEL)) { } @@ -6250,8 +6199,7 @@ static void FPT_scwtsel(unsigned long p_port) * *---------------------------------------------------------------------*/ -static void FPT_inisci(unsigned char p_card, unsigned long p_port, - unsigned char p_our_id) +static void FPT_inisci(unsigned char p_card, u32 p_port, unsigned char p_our_id) { unsigned char i, k, max_id; unsigned short ee_data; @@ -6437,7 +6385,7 @@ static unsigned char FPT_scmachid(unsigned char p_card, * *---------------------------------------------------------------------*/ -static void FPT_scsavdi(unsigned char p_card, unsigned long p_port) +static void FPT_scsavdi(unsigned char p_card, u32 p_port) { unsigned char i, k, max_id; unsigned short ee_data, sum_data; @@ -6482,7 +6430,7 @@ static void FPT_scsavdi(unsigned char p_card, unsigned long p_port) * *---------------------------------------------------------------------*/ -static void FPT_XbowInit(unsigned long port, unsigned char ScamFlg) +static void FPT_XbowInit(u32 port, unsigned char ScamFlg) { unsigned char i; @@ -6531,7 +6479,7 @@ static void FPT_XbowInit(unsigned long port, unsigned char ScamFlg) * *---------------------------------------------------------------------*/ -static void FPT_BusMasterInit(unsigned long p_port) +static void FPT_BusMasterInit(u32 p_port) { WR_HARPOON(p_port + hp_sys_ctrl, DRVR_RST); @@ -6558,7 +6506,7 @@ static void FPT_BusMasterInit(unsigned long p_port) * *---------------------------------------------------------------------*/ -static void FPT_DiagEEPROM(unsigned long p_port) +static void FPT_DiagEEPROM(u32 p_port) { unsigned short index, temp, max_wd_cnt; @@ -7206,7 +7154,7 @@ static void FPT_utilUpdateResidual(struct sccb *p_SCCB) { unsigned long partial_cnt; unsigned int sg_index; - unsigned long *sg_ptr; + struct blogic_sg_seg *segp; if (p_SCCB->Sccb_XferState & F_ALL_XFERRED) { @@ -7219,7 +7167,6 @@ static void FPT_utilUpdateResidual(struct sccb *p_SCCB) sg_index = p_SCCB->Sccb_sgseg; - sg_ptr = (unsigned long *)p_SCCB->DataPointer; if (p_SCCB->Sccb_SGoffset) { @@ -7229,8 +7176,9 @@ static void FPT_utilUpdateResidual(struct sccb *p_SCCB) while (((unsigned long)sg_index * (unsigned long)SG_ELEMENT_SIZE) < p_SCCB->DataLength) { - - partial_cnt += *(sg_ptr + (sg_index * 2)); + segp = (struct blogic_sg_seg *)(p_SCCB->DataPointer) + + (sg_index * 2); + partial_cnt += segp->segbytes; sg_index++; } @@ -7251,7 +7199,7 @@ static void FPT_utilUpdateResidual(struct sccb *p_SCCB) * *---------------------------------------------------------------------*/ -static void FPT_Wait1Second(unsigned long p_port) +static void FPT_Wait1Second(u32 p_port) { unsigned char i; @@ -7275,7 +7223,7 @@ static void FPT_Wait1Second(unsigned long p_port) * *---------------------------------------------------------------------*/ -static void FPT_Wait(unsigned long p_port, unsigned char p_delay) +static void FPT_Wait(u32 p_port, unsigned char p_delay) { unsigned char old_timer; unsigned char green_flag; @@ -7321,7 +7269,7 @@ static void FPT_Wait(unsigned long p_port, unsigned char p_delay) * *---------------------------------------------------------------------*/ -static void FPT_utilEEWriteOnOff(unsigned long p_port, unsigned char p_mode) +static void FPT_utilEEWriteOnOff(u32 p_port, unsigned char p_mode) { unsigned char ee_value; @@ -7350,7 +7298,7 @@ static void FPT_utilEEWriteOnOff(unsigned long p_port, unsigned char p_mode) * *---------------------------------------------------------------------*/ -static void FPT_utilEEWrite(unsigned long p_port, unsigned short ee_data, +static void FPT_utilEEWrite(u32 p_port, unsigned short ee_data, unsigned short ee_addr) { @@ -7401,7 +7349,7 @@ static void FPT_utilEEWrite(unsigned long p_port, unsigned short ee_data, * *---------------------------------------------------------------------*/ -static unsigned short FPT_utilEERead(unsigned long p_port, +static unsigned short FPT_utilEERead(u32 p_port, unsigned short ee_addr) { unsigned short i, ee_data1, ee_data2; @@ -7431,8 +7379,7 @@ static unsigned short FPT_utilEERead(unsigned long p_port, * *---------------------------------------------------------------------*/ -static unsigned short FPT_utilEEReadOrg(unsigned long p_port, - unsigned short ee_addr) +static unsigned short FPT_utilEEReadOrg(u32 p_port, unsigned short ee_addr) { unsigned char ee_value; @@ -7479,7 +7426,7 @@ static unsigned short FPT_utilEEReadOrg(unsigned long p_port, * *---------------------------------------------------------------------*/ -static void FPT_utilEESendCmdAddr(unsigned long p_port, unsigned char ee_cmd, +static void FPT_utilEESendCmdAddr(u32 p_port, unsigned char ee_cmd, unsigned short ee_addr) { unsigned char ee_value; @@ -7573,47 +7520,45 @@ static unsigned char FPT_CalcLrc(unsigned char buffer[]) */ static inline unsigned char -FlashPoint__ProbeHostAdapter(struct FlashPoint_Info *FlashPointInfo) +FlashPoint__ProbeHostAdapter(struct fpoint_info *FlashPointInfo) { return FlashPoint_ProbeHostAdapter((struct sccb_mgr_info *) FlashPointInfo); } -static inline FlashPoint_CardHandle_T -FlashPoint__HardwareResetHostAdapter(struct FlashPoint_Info *FlashPointInfo) +static inline void * +FlashPoint__HardwareResetHostAdapter(struct fpoint_info *FlashPointInfo) { return FlashPoint_HardwareResetHostAdapter((struct sccb_mgr_info *) FlashPointInfo); } static inline void -FlashPoint__ReleaseHostAdapter(FlashPoint_CardHandle_T CardHandle) +FlashPoint__ReleaseHostAdapter(void *CardHandle) { FlashPoint_ReleaseHostAdapter(CardHandle); } static inline void -FlashPoint__StartCCB(FlashPoint_CardHandle_T CardHandle, - struct BusLogic_CCB *CCB) +FlashPoint__StartCCB(void *CardHandle, struct blogic_ccb *CCB) { FlashPoint_StartCCB(CardHandle, (struct sccb *)CCB); } static inline void -FlashPoint__AbortCCB(FlashPoint_CardHandle_T CardHandle, - struct BusLogic_CCB *CCB) +FlashPoint__AbortCCB(void *CardHandle, struct blogic_ccb *CCB) { FlashPoint_AbortCCB(CardHandle, (struct sccb *)CCB); } static inline bool -FlashPoint__InterruptPending(FlashPoint_CardHandle_T CardHandle) +FlashPoint__InterruptPending(void *CardHandle) { return FlashPoint_InterruptPending(CardHandle); } static inline int -FlashPoint__HandleInterrupt(FlashPoint_CardHandle_T CardHandle) +FlashPoint__HandleInterrupt(void *CardHandle) { return FlashPoint_HandleInterrupt(CardHandle); } @@ -7632,13 +7577,12 @@ FlashPoint__HandleInterrupt(FlashPoint_CardHandle_T CardHandle) Define prototypes for the FlashPoint SCCB Manager Functions. */ -extern unsigned char FlashPoint_ProbeHostAdapter(struct FlashPoint_Info *); -extern FlashPoint_CardHandle_T -FlashPoint_HardwareResetHostAdapter(struct FlashPoint_Info *); -extern void FlashPoint_StartCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *); -extern int FlashPoint_AbortCCB(FlashPoint_CardHandle_T, struct BusLogic_CCB *); -extern bool FlashPoint_InterruptPending(FlashPoint_CardHandle_T); -extern int FlashPoint_HandleInterrupt(FlashPoint_CardHandle_T); -extern void FlashPoint_ReleaseHostAdapter(FlashPoint_CardHandle_T); +extern unsigned char FlashPoint_ProbeHostAdapter(struct fpoint_info *); +extern void *FlashPoint_HardwareResetHostAdapter(struct fpoint_info *); +extern void FlashPoint_StartCCB(void *, struct blogic_ccb *); +extern int FlashPoint_AbortCCB(void *, struct blogic_ccb *); +extern bool FlashPoint_InterruptPending(void *); +extern int FlashPoint_HandleInterrupt(void *); +extern void FlashPoint_ReleaseHostAdapter(void *); #endif /* CONFIG_SCSI_FLASHPOINT */ diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index db95c547c09..baca5897039 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -204,9 +204,9 @@ config SCSI_MULTI_LUN Some devices support more than one LUN (Logical Unit Number) in order to allow access to several media, e.g. CD jukebox, USB card reader, mobile phone in mass storage mode. This option forces the kernel to - probe for all LUNs by default. This setting can be overriden by + probe for all LUNs by default. This setting can be overridden by max_luns boot/module parameter. Note that this option does not affect - devices conforming to SCSI-3 or higher as they can explicitely report + devices conforming to SCSI-3 or higher as they can explicitly report their number of LUNs. It is safe to say Y here unless you have one of those rare devices which reacts in an unexpected way when probed for multiple LUNs. @@ -263,6 +263,9 @@ config SCSI_SCAN_ASYNC You can override this choice by specifying "scsi_mod.scan=sync" or async on the kernel's command line. + Note that this setting also affects whether resuming from + system suspend will be performed asynchronously. + menu "SCSI Transports" depends on SCSI @@ -499,47 +502,6 @@ config SCSI_AACRAID source "drivers/scsi/aic7xxx/Kconfig.aic7xxx" - -config SCSI_AIC7XXX_OLD - tristate "Adaptec AIC7xxx support (old driver)" - depends on (ISA || EISA || PCI ) && SCSI - help - WARNING This driver is an older aic7xxx driver and is no longer - under active development. Adaptec, Inc. is writing a new driver to - take the place of this one, and it is recommended that whenever - possible, people should use the new Adaptec written driver instead - of this one. This driver will eventually be phased out entirely. - - This is support for the various aic7xxx based Adaptec SCSI - controllers. These include the 274x EISA cards; 284x VLB cards; - 2902, 2910, 293x, 294x, 394x, 3985 and several other PCI and - motherboard based SCSI controllers from Adaptec. It does not support - the AAA-13x RAID controllers from Adaptec, nor will it likely ever - support them. It does not support the 2920 cards from Adaptec that - use the Future Domain SCSI controller chip. For those cards, you - need the "Future Domain 16xx SCSI support" driver. - - In general, if the controller is based on an Adaptec SCSI controller - chip from the aic777x series or the aic78xx series, this driver - should work. The only exception is the 7810 which is specifically - not supported (that's the RAID controller chip on the AAA-13x - cards). - - Note that the AHA2920 SCSI host adapter is *not* supported by this - driver; choose "Future Domain 16xx SCSI support" instead if you have - one of those. - - Information on the configuration options for this controller can be - found by checking the help file for each of the available - configuration options. You should read - <file:Documentation/scsi/aic7xxx_old.txt> at a minimum before - contacting the maintainer with any questions. The SCSI-HOWTO, - available from <http://www.tldp.org/docs.html#howto>, can also - be of great help. - - To compile this driver as a module, choose M here: the - module will be called aic7xxx_old. - source "drivers/scsi/aic7xxx/Kconfig.aic79xx" source "drivers/scsi/aic94xx/Kconfig" source "drivers/scsi/mvsas/Kconfig" @@ -601,6 +563,7 @@ config SCSI_ARCMSR To compile this driver as a module, choose M here: the module will be called arcmsr (modprobe arcmsr). +source "drivers/scsi/esas2r/Kconfig" source "drivers/scsi/megaraid/Kconfig.megaraid" source "drivers/scsi/mpt2sas/Kconfig" source "drivers/scsi/mpt3sas/Kconfig" @@ -633,7 +596,7 @@ config SCSI_BUSLOGIC config SCSI_FLASHPOINT bool "FlashPoint support" - depends on SCSI_BUSLOGIC && PCI && X86_32 + depends on SCSI_BUSLOGIC && PCI help This option allows you to add FlashPoint support to the BusLogic SCSI driver. The FlashPoint SCCB Manager code is @@ -1353,6 +1316,7 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI select SCSI_FC_ATTRS + select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse Family of Fibre Channel PCI host adapters. @@ -1809,6 +1773,7 @@ config SCSI_BFA_FC config SCSI_VIRTIO tristate "virtio-scsi support" depends on VIRTIO + select BLK_DEV_INTEGRITY help This is the virtual HBA driver for virtio. If the kernel will be used in a virtual machine, say Y or M. diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index b607ba4f563..e172d4f8e02 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -70,7 +70,6 @@ obj-$(CONFIG_SCSI_AHA1740) += aha1740.o obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/ obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ obj-$(CONFIG_SCSI_AACRAID) += aacraid/ -obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ obj-$(CONFIG_SCSI_PM8001) += pm8001/ obj-$(CONFIG_SCSI_ISCI) += isci/ @@ -141,6 +140,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ +obj-$(CONFIG_SCSI_ESAS2R) += esas2r/ obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 450353e04dd..93d13fc9a29 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -27,8 +27,6 @@ */ /* - * $Log: NCR5380.c,v $ - * Revision 1.10 1998/9/2 Alan Cox * (alan@lxorguk.ukuu.org.uk) * Fixed up the timer lockups reported so far. Things still suck. Looking @@ -89,13 +87,6 @@ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> -#ifndef NDEBUG -#define NDEBUG 0 -#endif -#ifndef NDEBUG_ABORT -#define NDEBUG_ABORT 0 -#endif - #if (NDEBUG & NDEBUG_LISTS) #define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } #define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } @@ -584,7 +575,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance, NCR5380_setup(instance); for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1) - if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0)) + if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0)) trying_irqs |= mask; timeout = jiffies + (250 * HZ / 1000); @@ -695,33 +686,35 @@ static void NCR5380_print_status(struct Scsi_Host *instance) * Return the number of bytes read from or written */ +static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, + char *buffer, int length) +{ +#ifdef DTC_PUBLIC_RELEASE + dtc_wmaxi = dtc_maxi = 0; +#endif +#ifdef PAS16_PUBLIC_RELEASE + pas_wmaxi = pas_maxi = 0; +#endif + return (-ENOSYS); /* Currently this is a no-op */ +} + #undef SPRINTF -#define SPRINTF(args...) do { if(pos < buffer + length-80) pos += sprintf(pos, ## args); } while(0) +#define SPRINTF(args...) seq_printf(m, ## args) static -char *lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, char *pos, char *buffer, int length); +void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m); static -char *lprint_command(unsigned char *cmd, char *pos, char *buffer, int len); +void lprint_command(unsigned char *cmd, struct seq_file *m); static -char *lprint_opcode(int opcode, char *pos, char *buffer, int length); +void lprint_opcode(int opcode, struct seq_file *m); -static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, - char *buffer, char **start, off_t offset, int length, int inout) +static int __maybe_unused NCR5380_show_info(struct seq_file *m, + struct Scsi_Host *instance) { - char *pos = buffer; struct NCR5380_hostdata *hostdata; Scsi_Cmnd *ptr; hostdata = (struct NCR5380_hostdata *) instance->hostdata; - if (inout) { /* Has data been written to the file ? */ -#ifdef DTC_PUBLIC_RELEASE - dtc_wmaxi = dtc_maxi = 0; -#endif -#ifdef PAS16_PUBLIC_RELEASE - pas_wmaxi = pas_maxi = 0; -#endif - return (-ENOSYS); /* Currently this is a no-op */ - } SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE); if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE); @@ -755,46 +748,37 @@ static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, if (!hostdata->connected) SPRINTF("scsi%d: no currently connected command\n", instance->host_no); else - pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, pos, buffer, length); + lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); SPRINTF("scsi%d: issue_queue\n", instance->host_no); for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) - pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); + lprint_Scsi_Cmnd(ptr, m); SPRINTF("scsi%d: disconnected_queue\n", instance->host_no); for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) - pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); + lprint_Scsi_Cmnd(ptr, m); spin_unlock_irq(instance->host_lock); - - *start = buffer; - if (pos - buffer < offset) - return 0; - else if (pos - buffer - offset < length) - return pos - buffer - offset; - return length; + return 0; } -static char *lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, char *pos, char *buffer, int length) +static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m) { SPRINTF("scsi%d : destination target %d, lun %d\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun); SPRINTF(" command = "); - pos = lprint_command(cmd->cmnd, pos, buffer, length); - return (pos); + lprint_command(cmd->cmnd, m); } -static char *lprint_command(unsigned char *command, char *pos, char *buffer, int length) +static void lprint_command(unsigned char *command, struct seq_file *m) { int i, s; - pos = lprint_opcode(command[0], pos, buffer, length); + lprint_opcode(command[0], m); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) SPRINTF("%02x ", command[i]); SPRINTF("\n"); - return (pos); } -static char *lprint_opcode(int opcode, char *pos, char *buffer, int length) +static void lprint_opcode(int opcode, struct seq_file *m) { SPRINTF("%2d (0x%02x)", opcode, opcode); - return (pos); } @@ -1012,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *) LIST(cmd, tmp); tmp->host_scribble = (unsigned char *) cmd; } - dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail")); + dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* Run the coroutine if it isn't already running. */ /* Kick off command processing */ @@ -1047,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work) /* Lock held here */ done = 1; if (!hostdata->connected && !hostdata->selecting) { - dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no)); + dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no); /* * Search through the issue_queue for a command destined * for a target that's not busy. @@ -1055,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work) for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) { if (prev != tmp) - dprintk(NDEBUG_LISTS, ("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun)); + dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); /* When we find one, remove it from the issue queue. */ if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) { if (prev) { @@ -1073,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work) * On failure, we must add the command back to the * issue queue so we can keep trying. */ - dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun)); + dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun); /* * A successful selection is defined as one that @@ -1102,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work) tmp->host_scribble = (unsigned char *) hostdata->issue_queue; hostdata->issue_queue = tmp; done = 0; - dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no)); + dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no); } /* lock held here still */ } /* if target/lun is not busy */ @@ -1132,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work) #endif && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies)) ) { - dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no)); + dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no); NCR5380_information_transfer(instance); - dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no)); + dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no); done = 0; } else break; @@ -1166,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) unsigned char basr; unsigned long flags; - dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", - instance->irq)); + dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n", + instance->irq); do { done = 1; @@ -1180,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { done = 0; - dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no)); + dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no); NCR5380_reselect(instance); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { - dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no)); + dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no)); + dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { #if defined(REAL_DMA) @@ -1217,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } #else - dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG))); + dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #endif } @@ -1311,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) hostdata->restart_select = 0; NCR5380_dprint(NDEBUG_ARBITRATION, instance); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the @@ -1340,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) goto failed; } - dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no); /* * The arbitration delay is 2.2us, but this is a minimum and there is @@ -1354,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { NCR5380_write(MODE_REG, MR_BASE); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no); goto failed; } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL); @@ -1367,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no); goto failed; } /* @@ -1377,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) udelay(2); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no); /* * Now that we have won arbitration, start Selection process, asserting @@ -1429,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) udelay(1); - dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd))); + dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)); /* * The SCSI specification calls for a 250 ms timeout for the actual @@ -1494,7 +1478,7 @@ part2: collect_stats(hostdata, cmd); cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no)); + dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } @@ -1527,7 +1511,7 @@ part2: goto failed; } - dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id)); + dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id); tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); len = 1; @@ -1537,7 +1521,7 @@ part2: data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no)); + dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no); /* XXX need to handle errors here */ hostdata->connected = cmd; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); @@ -1590,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase NCR5380_setup(instance); if (!(p & SR_IO)) - dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c)); + dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c); else - dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c)); + dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c); /* * The NCR5380 chip will only drive the SCSI bus when the @@ -1627,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase break; } - dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no)); + dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { - dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no)); + dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no); NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance); break; } @@ -1667,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase /* FIXME - if this fails bus reset ?? */ NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ); - dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no)); + dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no); /* * We have several special cases to consider during REQ/ACK handshaking : @@ -1688,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase } } while (--c); - dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c)); + dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c); *count = c; *data = d; @@ -1835,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase c -= 2; } #endif - dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d)); + dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); #endif @@ -1864,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE); #endif /* def REAL_DMA */ - dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG))); + dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)); /* * On the PAS16 at least I/O recovery delays are not needed here. @@ -1941,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase } } - dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG))); + dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); @@ -1955,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase #ifdef READ_OVERRUNS if (*phase == p && (p & SR_IO) && residue == 0) { if (overrun) { - dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n")); + dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); **data = saved_data; *data += 1; *count -= 1; @@ -1964,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase printk("No overrun??\n"); cnt = toPIO = 2; } - dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data)); + dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data); NCR5380_transfer_pio(instance, phase, &cnt, data); *count -= toPIO - cnt; } #endif - dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count))); + dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)); return 0; #elif defined(REAL_DMA) @@ -2020,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase foo = NCR5380_pwrite(instance, d, c); #else int timeout; - dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c)); + dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c); if (!(foo = NCR5380_pwrite(instance, d, c))) { /* * Wait for the last byte to be sent. If REQ is being asserted for @@ -2031,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)); if (!timeout) - dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no)); + dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no); if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) { hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT; if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) { hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT; - dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no)); + dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no); } } } else { - dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n")); + dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n"); while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)); - dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n")); + dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n"); } } #endif @@ -2052,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) { - dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n")); + dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n"); if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) { - dprintk(NDEBUG_C400_PWRITE, ("53C400w: got it, reading reset interrupt reg\n")); + dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n"); NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { printk("53C400w: IRQ NOT THERE!\n"); @@ -2146,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); - dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual)); + dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be @@ -2226,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun); /* * Sanity check : A linked command should only terminate with * one of these messages if there are more linked commands @@ -2242,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { /* The next command is still part of this process */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); collect_stats(hostdata, cmd); cmd->scsi_done(cmd); cmd = hostdata->connected; @@ -2254,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { sink = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; - dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun); hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); /* @@ -2288,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no)); + dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no); LIST(cmd, hostdata->issue_queue); cmd->host_scribble = (unsigned char *) hostdata->issue_queue; hostdata->issue_queue = (Scsi_Cmnd *) cmd; - dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no)); + dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no); } else #endif /* def AUTOSENSE */ { @@ -2334,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { hostdata->disconnected_queue; hostdata->connected = NULL; hostdata->disconnected_queue = cmd; - dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. @@ -2380,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no)); + dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2])); + dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]); if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { /* Accept third byte by clearing ACK */ @@ -2397,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len)); + dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len); switch (extended_msg[2]) { case EXTENDED_SDTR: @@ -2463,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { NCR5380_transfer_pio(instance, &phase, &len, &data); if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) { NCR5380_set_timer(hostdata, USLEEP_SLEEP); - dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); + dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires); return; } break; @@ -2475,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { break; default: printk("scsi%d : unknown phase\n", instance->host_no); - NCR5380_dprint(NDEBUG_ALL, instance); + NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ else { @@ -2483,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { */ if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) { NCR5380_set_timer(hostdata, USLEEP_SLEEP); - dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); + dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires); return; } } @@ -2524,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no)); + dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no); /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2604,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { do_abort(instance); } else { hostdata->connected = tmp; - dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag)); + dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag); } } @@ -2689,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { NCR5380_setup(instance); - dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no)); - dprintk(NDEBUG_ABORT, (" basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG))); + dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no); + dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 0 /* @@ -2700,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { */ if (hostdata->connected == cmd) { - dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no); hostdata->aborted = 1; /* * We should perform BSY checking, and make sure we haven't slipped @@ -2728,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { * from the issue queue. */ - dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no); for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) if (cmd == tmp) { REMOVE(5, *prev, tmp, tmp->host_scribble); (*prev) = (Scsi_Cmnd *) tmp->host_scribble; tmp->host_scribble = NULL; tmp->result = DID_ABORT << 16; - dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no); tmp->scsi_done(tmp); return SUCCESS; } @@ -2757,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { */ if (hostdata->connected) { - dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no); return FAILED; } /* @@ -2787,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) if (cmd == tmp) { - dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no); if (NCR5380_select(instance, cmd, (int) cmd->tag)) return FAILED; - dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no); do_abort(instance); diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index fd40a32b1f6..c79ddfa6f53 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -21,10 +21,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: NCR5380.h,v $ - */ - #ifndef NCR5380_H #define NCR5380_H @@ -60,6 +56,9 @@ #define NDEBUG_C400_PREAD 0x100000 #define NDEBUG_C400_PWRITE 0x200000 #define NDEBUG_LISTS 0x400000 +#define NDEBUG_ABORT 0x800000 +#define NDEBUG_TAGS 0x1000000 +#define NDEBUG_MERGING 0x2000000 #define NDEBUG_ANY 0xFFFFFFFFUL @@ -292,9 +291,24 @@ struct NCR5380_hostdata { #ifdef __KERNEL__ -#define dprintk(a,b) do {} while(0) -#define NCR5380_dprint(a,b) do {} while(0) -#define NCR5380_dprint_phase(a,b) do {} while(0) +#ifndef NDEBUG +#define NDEBUG (0) +#endif + +#define dprintk(flg, fmt, ...) \ + do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0) + +#if NDEBUG +#define NCR5380_dprint(flg, arg) \ + do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0) +#define NCR5380_dprint_phase(flg, arg) \ + do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0) +static void NCR5380_print_phase(struct Scsi_Host *instance); +static void NCR5380_print(struct Scsi_Host *instance); +#else +#define NCR5380_dprint(flg, arg) do {} while (0) +#define NCR5380_dprint_phase(flg, arg) do {} while (0) +#endif #if defined(AUTOPROBE_IRQ) static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); @@ -307,15 +321,13 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id); #endif static void NCR5380_main(struct work_struct *work); static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); -#ifdef NDEBUG -static void NCR5380_print_phase(struct Scsi_Host *instance); -static void NCR5380_print(struct Scsi_Host *instance); -#endif static int NCR5380_abort(Scsi_Cmnd * cmd); static int NCR5380_bus_reset(Scsi_Cmnd * cmd); static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); -static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, - char *buffer, char **start, off_t offset, int length, int inout); +static int __maybe_unused NCR5380_show_info(struct seq_file *, + struct Scsi_Host *); +static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance, + char *buffer, int length); static void NCR5380_reselect(struct Scsi_Host *instance); static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag); diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 3e09aa21c1c..9176bfbd574 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c @@ -166,7 +166,8 @@ static int a2091_bus_reset(struct scsi_cmnd *cmd) static struct scsi_host_template a2091_scsi_template = { .module = THIS_MODULE, .name = "Commodore A2091/A590 SCSI", - .proc_info = wd33c93_proc_info, + .show_info = wd33c93_show_info, + .write_info = wd33c93_write_info, .proc_name = "A2901", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, @@ -200,7 +201,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) instance->irq = IRQ_AMIGA_PORTS; instance->unique_id = z->slotaddr; - regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); + regs = ZTWO_VADDR(z->resource.start); regs->DAWR = DAWR_A2091; wdregs.SASR = ®s->SASR; diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index e29fe0e708f..dd5b64726dd 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -181,7 +181,8 @@ static int a3000_bus_reset(struct scsi_cmnd *cmd) static struct scsi_host_template amiga_a3000_scsi_template = { .module = THIS_MODULE, .name = "Amiga 3000 built-in SCSI", - .proc_info = wd33c93_proc_info, + .show_info = wd33c93_show_info, + .write_info = wd33c93_write_info, .proc_name = "A3000", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, @@ -219,7 +220,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) instance->irq = IRQ_AMIGA_PORTS; - regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); + regs = ZTWO_VADDR(res->start); regs->DAWR = DAWR_A3000; wdregs.SASR = ®s->SASR; @@ -279,18 +280,7 @@ static struct platform_driver amiga_a3000_scsi_driver = { }, }; -static int __init amiga_a3000_scsi_init(void) -{ - return platform_driver_probe(&amiga_a3000_scsi_driver, - amiga_a3000_scsi_probe); -} -module_init(amiga_a3000_scsi_init); - -static void __exit amiga_a3000_scsi_exit(void) -{ - platform_driver_unregister(&amiga_a3000_scsi_driver); -} -module_exit(amiga_a3000_scsi_exit); +module_platform_driver_probe(amiga_a3000_scsi_driver, amiga_a3000_scsi_probe); MODULE_DESCRIPTION("Amiga 3000 built-in SCSI"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index 23c76f41883..f5a2ab41543 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -56,7 +56,7 @@ static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev) scsi_addr = res->start + A4000T_SCSI_OFFSET; /* Fill in the required pieces of hostdata */ - hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr); + hostdata->base = ZTWO_VADDR(scsi_addr); hostdata->clock = 50; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; @@ -116,20 +116,7 @@ static struct platform_driver amiga_a4000t_scsi_driver = { }, }; -static int __init amiga_a4000t_scsi_init(void) -{ - return platform_driver_probe(&amiga_a4000t_scsi_driver, - amiga_a4000t_scsi_probe); -} - -module_init(amiga_a4000t_scsi_init); - -static void __exit amiga_a4000t_scsi_exit(void) -{ - platform_driver_unregister(&amiga_a4000t_scsi_driver); -} - -module_exit(amiga_a4000t_scsi_exit); +module_platform_driver_probe(amiga_a4000t_scsi_driver, amiga_a4000t_scsi_probe); MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / " "Kars de Jong <jongk@linux-m68k.org>"); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index a6f7190c09a..eaaf8705a5f 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -12,7 +12,7 @@ *----------------------------------------------------------------------------*/ #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 30000 +# define AAC_DRIVER_BUILD 30300 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -1918,6 +1918,10 @@ extern struct aac_common aac_config; #define MONITOR_PANIC 0x00000020 #define KERNEL_UP_AND_RUNNING 0x00000080 #define KERNEL_PANIC 0x00000100 +#define FLASH_UPD_PENDING 0x00002000 +#define FLASH_UPD_SUCCESS 0x00004000 +#define FLASH_UPD_FAILED 0x00008000 +#define FWUPD_TIMEOUT (5 * 60) /* * Doorbell bit defines diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 1ef041bc60c..fbcd48d0bfc 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -318,7 +318,8 @@ return_fib: kthread_stop(dev->thread); ssleep(1); dev->aif_thread = 0; - dev->thread = kthread_run(aac_command_thread, dev, dev->name); + dev->thread = kthread_run(aac_command_thread, dev, + "%s", dev->name); ssleep(1); } if (f.wait) { @@ -510,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) goto cleanup; } - if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { + if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || + (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 3f759957f4b..177b094c779 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -214,7 +214,7 @@ int aac_send_shutdown(struct aac_dev * dev) cmd = (struct aac_close *) fib_data(fibctx); cmd->command = cpu_to_le32(VM_CloseAll); - cmd->cid = cpu_to_le32(0xffffffff); + cmd->cid = cpu_to_le32(0xfffffffe); status = aac_fib_send(ContainerCommand, fibctx, diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 1be0776a80c..cab190af634 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1336,7 +1336,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) goto out; if (jafo) { - aac->thread = kthread_run(aac_command_thread, aac, aac->name); + aac->thread = kthread_run(aac_command_thread, aac, "%s", + aac->name); if (IS_ERR(aac->thread)) { retval = PTR_ERR(aac->thread); goto out; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 408a42ef787..4921ed19a02 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) { struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg); } @@ -1079,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = { #endif .use_clustering = ENABLE_CLUSTERING, .emulated = 1, + .no_write_same = 1, }; static void __aac_shutdown(struct aac_dev * aac) diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index dada38aeacc..5c6a8703f53 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -480,7 +480,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size) static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) { - u32 var; + u32 var = 0; if (!(dev->supplement_adapter_info.SupportedOptions2 & AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { @@ -500,13 +500,14 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled) if (bled && (bled != -ETIMEDOUT)) return -EINVAL; } - if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */ + if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */ rx_writel(dev, MUnit.reserved2, 3); msleep(5000); /* Delay 5 seconds */ var = 0x00000001; } - if (var != 0x00000001) + if (bled && (var != 0x00000001)) return -EINVAL; + ssleep(5); if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) return -ENODEV; if (startup_timeout < 300) @@ -646,7 +647,7 @@ int _aac_rx_init(struct aac_dev *dev) dev->sync_mode = 0; /* sync. mode not supported */ dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { + IRQF_SHARED, "aacraid", dev) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 2244f315f33..e66477c9824 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -387,8 +387,7 @@ int aac_sa_init(struct aac_dev *dev) goto error_irq; dev->sync_mode = 0; /* sync. mode not supported */ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED|IRQF_DISABLED, - "aacraid", (void *)dev ) < 0) { + IRQF_SHARED, "aacraid", (void *)dev) < 0) { printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance); goto error_iounmap; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index e2e349204e7..9c65aed2621 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -93,6 +93,9 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id) int send_it = 0; extern int aac_sync_mode; + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); + if (!aac_sync_mode) { src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); @@ -644,7 +647,7 @@ int aac_src_init(struct aac_dev *dev) dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { + IRQF_SHARED, "aacraid", dev) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); @@ -703,6 +706,28 @@ int aac_srcv_init(struct aac_dev *dev) !aac_src_restart_adapter(dev, 0)) ++restart; /* + * Check to see if flash update is running. + * Wait for the adapter to be up and running. Wait up to 5 minutes + */ + status = src_readl(dev, MUnit.OMR); + if (status & FLASH_UPD_PENDING) { + start = jiffies; + do { + status = src_readl(dev, MUnit.OMR); + if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) { + printk(KERN_ERR "%s%d: adapter flash update failed.\n", + dev->name, instance); + goto error_iounmap; + } + } while (!(status & FLASH_UPD_SUCCESS) && + !(status & FLASH_UPD_FAILED)); + /* Delay 10 seconds. + * Because right now FW is doing a soft reset, + * do not read scratch pad register at this time + */ + ssleep(10); + } + /* * Check to see if the board panic'd while booting. */ status = src_readl(dev, MUnit.OMR); @@ -730,7 +755,9 @@ int aac_srcv_init(struct aac_dev *dev) /* * Wait for the adapter to be up and running. Wait up to 3 minutes */ - while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) { + while (!((status = src_readl(dev, MUnit.OMR)) & + KERNEL_UP_AND_RUNNING) || + status == 0xffffffff) { if ((restart && (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) || time_after(jiffies, start+HZ*startup_timeout)) { @@ -777,7 +804,7 @@ int aac_srcv_init(struct aac_dev *dev) goto error_iounmap; dev->msi = aac_msi && !pci_enable_msi(dev->pdev); if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { + IRQF_SHARED, "aacraid", dev) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); printk(KERN_ERR "%s%d: Interrupt unavailable.\n", diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index dcfaee66a8b..d8145888e66 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -2178,22 +2178,6 @@ do { \ #define ASC_INFO_SIZE 128 /* advansys_info() line size */ -#ifdef CONFIG_PROC_FS -/* /proc/scsi/advansys/[0...] related definitions */ -#define ASC_PRTBUF_SIZE 2048 -#define ASC_PRTLINE_SIZE 160 - -#define ASC_PRT_NEXT() \ - if (cp) { \ - totlen += len; \ - leftlen -= len; \ - if (leftlen == 0) { \ - return totlen; \ - } \ - cp += len; \ - } -#endif /* CONFIG_PROC_FS */ - /* Asc Library return codes */ #define ASC_TRUE 1 #define ASC_FALSE 0 @@ -2384,7 +2368,6 @@ struct asc_board { } eep_config; ulong last_reset; /* Saved last reset time */ /* /proc/scsi/advansys/[0...] */ - char *prtbuf; /* /proc print buffer */ #ifdef ADVANSYS_STATS struct asc_stats asc_stats; /* Board statistics */ #endif /* ADVANSYS_STATS */ @@ -2528,8 +2511,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s) struct asc_board *boardp = shost_priv(s); printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); - printk(" host_busy %u, host_no %d, last_reset %d,\n", - s->host_busy, s->host_no, (unsigned)s->last_reset); + printk(" host_busy %u, host_no %d,\n", + s->host_busy, s->host_no); printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", (ulong)s->base, (ulong)s->io_port, boardp->irq); @@ -2875,64 +2858,21 @@ static const char *advansys_info(struct Scsi_Host *shost) } #ifdef CONFIG_PROC_FS -/* - * asc_prt_line() - * - * If 'cp' is NULL print to the console, otherwise print to a buffer. - * - * Return 0 if printing to the console, otherwise return the number of - * bytes written to the buffer. - * - * Note: If any single line is greater than ASC_PRTLINE_SIZE bytes the stack - * will be corrupted. 's[]' is defined to be ASC_PRTLINE_SIZE bytes. - */ -static int asc_prt_line(char *buf, int buflen, char *fmt, ...) -{ - va_list args; - int ret; - char s[ASC_PRTLINE_SIZE]; - - va_start(args, fmt); - ret = vsprintf(s, fmt, args); - BUG_ON(ret >= ASC_PRTLINE_SIZE); - if (buf == NULL) { - (void)printk(s); - ret = 0; - } else { - ret = min(buflen, ret); - memcpy(buf, s, ret); - } - va_end(args); - return ret; -} /* * asc_prt_board_devices() * * Print driver information for devices attached to the board. - * - * Note: no single line should be greater than ASC_PRTLINE_SIZE, - * cf. asc_prt_line(). - * - * Return the number of characters copied into 'cp'. No more than - * 'cplen' characters will be copied to 'cp'. */ -static int asc_prt_board_devices(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_board_devices(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); - int leftlen; - int totlen; - int len; int chip_scsi_id; int i; - leftlen = cplen; - totlen = len = 0; - - len = asc_prt_line(cp, leftlen, - "\nDevice Information for AdvanSys SCSI Host %d:\n", - shost->host_no); - ASC_PRT_NEXT(); + seq_printf(m, + "\nDevice Information for AdvanSys SCSI Host %d:\n", + shost->host_no); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; @@ -2940,60 +2880,42 @@ static int asc_prt_board_devices(struct Scsi_Host *shost, char *cp, int cplen) chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } - len = asc_prt_line(cp, leftlen, "Target IDs Detected:"); - ASC_PRT_NEXT(); + seq_printf(m, "Target IDs Detected:"); for (i = 0; i <= ADV_MAX_TID; i++) { - if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) { - len = asc_prt_line(cp, leftlen, " %X,", i); - ASC_PRT_NEXT(); - } + if (boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) + seq_printf(m, " %X,", i); } - len = asc_prt_line(cp, leftlen, " (%X=Host Adapter)\n", chip_scsi_id); - ASC_PRT_NEXT(); - - return totlen; + seq_printf(m, " (%X=Host Adapter)\n", chip_scsi_id); } /* * Display Wide Board BIOS Information. */ -static int asc_prt_adv_bios(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_adv_bios(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); - int leftlen; - int totlen; - int len; ushort major, minor, letter; - leftlen = cplen; - totlen = len = 0; - - len = asc_prt_line(cp, leftlen, "\nROM BIOS Version: "); - ASC_PRT_NEXT(); + seq_printf(m, "\nROM BIOS Version: "); /* * If the BIOS saved a valid signature, then fill in * the BIOS code segment base address. */ if (boardp->bios_signature != 0x55AA) { - len = asc_prt_line(cp, leftlen, "Disabled or Pre-3.1\n"); - ASC_PRT_NEXT(); - len = asc_prt_line(cp, leftlen, - "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"); - ASC_PRT_NEXT(); - len = asc_prt_line(cp, leftlen, - "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); - ASC_PRT_NEXT(); + seq_printf(m, "Disabled or Pre-3.1\n"); + seq_printf(m, + "BIOS either disabled or Pre-3.1. If it is pre-3.1, then a newer version\n"); + seq_printf(m, + "can be found at the ConnectCom FTP site: ftp://ftp.connectcom.net/pub\n"); } else { major = (boardp->bios_version >> 12) & 0xF; minor = (boardp->bios_version >> 8) & 0xF; letter = (boardp->bios_version & 0xFF); - len = asc_prt_line(cp, leftlen, "%d.%d%c\n", + seq_printf(m, "%d.%d%c\n", major, minor, letter >= 26 ? '?' : letter + 'A'); - ASC_PRT_NEXT(); - /* * Current available ROM BIOS release is 3.1I for UW * and 3.2I for U2W. This code doesn't differentiate @@ -3001,16 +2923,12 @@ static int asc_prt_adv_bios(struct Scsi_Host *shost, char *cp, int cplen) */ if (major < 3 || (major <= 3 && minor < 1) || (major <= 3 && minor <= 1 && letter < ('I' - 'A'))) { - len = asc_prt_line(cp, leftlen, - "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"); - ASC_PRT_NEXT(); - len = asc_prt_line(cp, leftlen, - "ftp://ftp.connectcom.net/pub\n"); - ASC_PRT_NEXT(); + seq_printf(m, + "Newer version of ROM BIOS is available at the ConnectCom FTP site:\n"); + seq_printf(m, + "ftp://ftp.connectcom.net/pub\n"); } } - - return totlen; } /* @@ -3115,20 +3033,11 @@ static int asc_get_eeprom_string(ushort *serialnum, uchar *cp) * asc_prt_asc_board_eeprom() * * Print board EEPROM configuration. - * - * Note: no single line should be greater than ASC_PRTLINE_SIZE, - * cf. asc_prt_line(). - * - * Return the number of characters copied into 'cp'. No more than - * 'cplen' characters will be copied to 'cp'. */ -static int asc_prt_asc_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); ASC_DVC_VAR *asc_dvc_varp; - int leftlen; - int totlen; - int len; ASCEEP_CONFIG *ep; int i; #ifdef CONFIG_ISA @@ -3139,129 +3048,75 @@ static int asc_prt_asc_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen asc_dvc_varp = &boardp->dvc_var.asc_dvc_var; ep = &boardp->eep_config.asc_eep; - leftlen = cplen; - totlen = len = 0; - - len = asc_prt_line(cp, leftlen, - "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", - shost->host_no); - ASC_PRT_NEXT(); + seq_printf(m, + "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", + shost->host_no); if (asc_get_eeprom_string((ushort *)&ep->adapter_info[0], serialstr) - == ASC_TRUE) { - len = - asc_prt_line(cp, leftlen, " Serial Number: %s\n", - serialstr); - ASC_PRT_NEXT(); - } else { - if (ep->adapter_info[5] == 0xBB) { - len = asc_prt_line(cp, leftlen, - " Default Settings Used for EEPROM-less Adapter.\n"); - ASC_PRT_NEXT(); - } else { - len = asc_prt_line(cp, leftlen, - " Serial Number Signature Not Present.\n"); - ASC_PRT_NEXT(); - } - } - - len = asc_prt_line(cp, leftlen, - " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", - ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng, - ep->max_tag_qng); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, - " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, " Target ID: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ASC_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %d", i); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, " Disconnects: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ASC_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (ep-> - disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, " Command Queuing: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ASC_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (ep-> - use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, " Start Motor: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ASC_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (ep-> - start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); - ASC_PRT_NEXT(); - for (i = 0; i <= ASC_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (ep-> - init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + == ASC_TRUE) + seq_printf(m, " Serial Number: %s\n", serialstr); + else if (ep->adapter_info[5] == 0xBB) + seq_printf(m, + " Default Settings Used for EEPROM-less Adapter.\n"); + else + seq_printf(m, + " Serial Number Signature Not Present.\n"); + + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ASC_EEP_GET_CHIP_ID(ep), ep->max_total_qng, + ep->max_tag_qng); + + seq_printf(m, + " cntl 0x%x, no_scam 0x%x\n", ep->cntl, ep->no_scam); + + seq_printf(m, " Target ID: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %d", i); + seq_printf(m, "\n"); + + seq_printf(m, " Disconnects: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->disc_enable & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); + + seq_printf(m, " Command Queuing: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->use_cmd_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); + + seq_printf(m, " Start Motor: "); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->start_motor & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); + + seq_printf(m, " Synchronous Transfer:"); + for (i = 0; i <= ASC_MAX_TID; i++) + seq_printf(m, " %c", + (ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); #ifdef CONFIG_ISA if (asc_dvc_varp->bus_type & ASC_IS_ISA) { - len = asc_prt_line(cp, leftlen, - " Host ISA DMA speed: %d MB/S\n", - isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]); - ASC_PRT_NEXT(); + seq_printf(m, + " Host ISA DMA speed: %d MB/S\n", + isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]); } #endif /* CONFIG_ISA */ - - return totlen; } /* * asc_prt_adv_board_eeprom() * * Print board EEPROM configuration. - * - * Note: no single line should be greater than ASC_PRTLINE_SIZE, - * cf. asc_prt_line(). - * - * Return the number of characters copied into 'cp'. No more than - * 'cplen' characters will be copied to 'cp'. */ -static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_adv_board_eeprom(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); ADV_DVC_VAR *adv_dvc_varp; - int leftlen; - int totlen; - int len; int i; char *termstr; uchar serialstr[13]; @@ -3281,13 +3136,9 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen ep_38C1600 = &boardp->eep_config.adv_38C1600_eep; } - leftlen = cplen; - totlen = len = 0; - - len = asc_prt_line(cp, leftlen, - "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", - shost->host_no); - ASC_PRT_NEXT(); + seq_printf(m, + "\nEEPROM Settings for AdvanSys SCSI Host %d:\n", + shost->host_no); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { wordp = &ep_3550->serial_number_word1; @@ -3297,38 +3148,28 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen wordp = &ep_38C1600->serial_number_word1; } - if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) { - len = - asc_prt_line(cp, leftlen, " Serial Number: %s\n", - serialstr); - ASC_PRT_NEXT(); - } else { - len = asc_prt_line(cp, leftlen, - " Serial Number Signature Not Present.\n"); - ASC_PRT_NEXT(); - } + if (asc_get_eeprom_string(wordp, serialstr) == ASC_TRUE) + seq_printf(m, " Serial Number: %s\n", serialstr); + else + seq_printf(m, " Serial Number Signature Not Present.\n"); - if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { - len = asc_prt_line(cp, leftlen, - " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", - ep_3550->adapter_scsi_id, - ep_3550->max_host_qng, ep_3550->max_dvc_qng); - ASC_PRT_NEXT(); - } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { - len = asc_prt_line(cp, leftlen, - " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", - ep_38C0800->adapter_scsi_id, - ep_38C0800->max_host_qng, - ep_38C0800->max_dvc_qng); - ASC_PRT_NEXT(); - } else { - len = asc_prt_line(cp, leftlen, - " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", - ep_38C1600->adapter_scsi_id, - ep_38C1600->max_host_qng, - ep_38C1600->max_dvc_qng); - ASC_PRT_NEXT(); - } + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ep_3550->adapter_scsi_id, + ep_3550->max_host_qng, ep_3550->max_dvc_qng); + else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ep_38C0800->adapter_scsi_id, + ep_38C0800->max_host_qng, + ep_38C0800->max_dvc_qng); + else + seq_printf(m, + " Host SCSI ID: %u, Host Queue Size: %u, Device Queue Size: %u\n", + ep_38C1600->adapter_scsi_id, + ep_38C1600->max_host_qng, + ep_38C1600->max_dvc_qng); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->termination; } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { @@ -3352,34 +3193,26 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen break; } - if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { - len = asc_prt_line(cp, leftlen, - " termination: %u (%s), bios_ctrl: 0x%x\n", - ep_3550->termination, termstr, - ep_3550->bios_ctrl); - ASC_PRT_NEXT(); - } else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) { - len = asc_prt_line(cp, leftlen, - " termination: %u (%s), bios_ctrl: 0x%x\n", - ep_38C0800->termination_lvd, termstr, - ep_38C0800->bios_ctrl); - ASC_PRT_NEXT(); - } else { - len = asc_prt_line(cp, leftlen, - " termination: %u (%s), bios_ctrl: 0x%x\n", - ep_38C1600->termination_lvd, termstr, - ep_38C1600->bios_ctrl); - ASC_PRT_NEXT(); - } + if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) + seq_printf(m, + " termination: %u (%s), bios_ctrl: 0x%x\n", + ep_3550->termination, termstr, + ep_3550->bios_ctrl); + else if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800) + seq_printf(m, + " termination: %u (%s), bios_ctrl: 0x%x\n", + ep_38C0800->termination_lvd, termstr, + ep_38C0800->bios_ctrl); + else + seq_printf(m, + " termination: %u (%s), bios_ctrl: 0x%x\n", + ep_38C1600->termination_lvd, termstr, + ep_38C1600->bios_ctrl); - len = asc_prt_line(cp, leftlen, " Target ID: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ADV_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %X", i); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, " Target ID: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %X", i); + seq_printf(m, "\n"); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->disc_enable; @@ -3388,15 +3221,11 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen } else { word = ep_38C1600->disc_enable; } - len = asc_prt_line(cp, leftlen, " Disconnects: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ADV_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, " Disconnects: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->tagqng_able; @@ -3405,15 +3234,11 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen } else { word = ep_38C1600->tagqng_able; } - len = asc_prt_line(cp, leftlen, " Command Queuing: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ADV_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, " Command Queuing: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { word = ep_3550->start_motor; @@ -3422,42 +3247,28 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen } else { word = ep_38C1600->start_motor; } - len = asc_prt_line(cp, leftlen, " Start Motor: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ADV_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, " Start Motor: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { - len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); - ASC_PRT_NEXT(); - for (i = 0; i <= ADV_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (ep_3550-> - sdtr_able & ADV_TID_TO_TIDMASK(i)) ? - 'Y' : 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, " Synchronous Transfer:"); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (ep_3550->sdtr_able & ADV_TID_TO_TIDMASK(i)) ? + 'Y' : 'N'); + seq_printf(m, "\n"); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { - len = asc_prt_line(cp, leftlen, " Ultra Transfer: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ADV_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (ep_3550-> - ultra_able & ADV_TID_TO_TIDMASK(i)) - ? 'Y' : 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, " Ultra Transfer: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (ep_3550->ultra_able & ADV_TID_TO_TIDMASK(i)) + ? 'Y' : 'N'); + seq_printf(m, "\n"); } if (adv_dvc_varp->chip_type == ADV_CHIP_ASC3550) { @@ -3467,21 +3278,16 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen } else { word = ep_38C1600->wdtr_able; } - len = asc_prt_line(cp, leftlen, " Wide Transfer: "); - ASC_PRT_NEXT(); - for (i = 0; i <= ADV_MAX_TID; i++) { - len = asc_prt_line(cp, leftlen, " %c", - (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); - ASC_PRT_NEXT(); - } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, " Wide Transfer: "); + for (i = 0; i <= ADV_MAX_TID; i++) + seq_printf(m, " %c", + (word & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); + seq_printf(m, "\n"); if (adv_dvc_varp->chip_type == ADV_CHIP_ASC38C0800 || adv_dvc_varp->chip_type == ADV_CHIP_ASC38C1600) { - len = asc_prt_line(cp, leftlen, - " Synchronous Transfer Speed (Mhz):\n "); - ASC_PRT_NEXT(); + seq_printf(m, + " Synchronous Transfer Speed (Mhz):\n "); for (i = 0; i <= ADV_MAX_TID; i++) { char *speed_str; @@ -3517,99 +3323,64 @@ static int asc_prt_adv_board_eeprom(struct Scsi_Host *shost, char *cp, int cplen speed_str = "Unk"; break; } - len = asc_prt_line(cp, leftlen, "%X:%s ", i, speed_str); - ASC_PRT_NEXT(); - if (i == 7) { - len = asc_prt_line(cp, leftlen, "\n "); - ASC_PRT_NEXT(); - } + seq_printf(m, "%X:%s ", i, speed_str); + if (i == 7) + seq_printf(m, "\n "); sdtr_speed >>= 4; } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); } - - return totlen; } /* * asc_prt_driver_conf() - * - * Note: no single line should be greater than ASC_PRTLINE_SIZE, - * cf. asc_prt_line(). - * - * Return the number of characters copied into 'cp'. No more than - * 'cplen' characters will be copied to 'cp'. */ -static int asc_prt_driver_conf(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); - int leftlen; - int totlen; - int len; int chip_scsi_id; - leftlen = cplen; - totlen = len = 0; + seq_printf(m, + "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n", + shost->host_no); - len = asc_prt_line(cp, leftlen, - "\nLinux Driver Configuration and Information for AdvanSys SCSI Host %d:\n", - shost->host_no); - ASC_PRT_NEXT(); + seq_printf(m, + " host_busy %u, max_id %u, max_lun %u, max_channel %u\n", + shost->host_busy, shost->max_id, + shost->max_lun, shost->max_channel); - len = asc_prt_line(cp, leftlen, - " host_busy %u, last_reset %u, max_id %u, max_lun %u, max_channel %u\n", - shost->host_busy, shost->last_reset, shost->max_id, - shost->max_lun, shost->max_channel); - ASC_PRT_NEXT(); + seq_printf(m, + " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n", + shost->unique_id, shost->can_queue, shost->this_id, + shost->sg_tablesize, shost->cmd_per_lun); - len = asc_prt_line(cp, leftlen, - " unique_id %d, can_queue %d, this_id %d, sg_tablesize %u, cmd_per_lun %u\n", - shost->unique_id, shost->can_queue, shost->this_id, - shost->sg_tablesize, shost->cmd_per_lun); - ASC_PRT_NEXT(); + seq_printf(m, + " unchecked_isa_dma %d, use_clustering %d\n", + shost->unchecked_isa_dma, shost->use_clustering); - len = asc_prt_line(cp, leftlen, - " unchecked_isa_dma %d, use_clustering %d\n", - shost->unchecked_isa_dma, shost->use_clustering); - ASC_PRT_NEXT(); + seq_printf(m, + " flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n", + boardp->flags, boardp->last_reset, jiffies, + boardp->asc_n_io_port); - len = asc_prt_line(cp, leftlen, - " flags 0x%x, last_reset 0x%x, jiffies 0x%x, asc_n_io_port 0x%x\n", - boardp->flags, boardp->last_reset, jiffies, - boardp->asc_n_io_port); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, " io_port 0x%x\n", shost->io_port); - ASC_PRT_NEXT(); + seq_printf(m, " io_port 0x%lx\n", shost->io_port); if (ASC_NARROW_BOARD(boardp)) { chip_scsi_id = boardp->dvc_cfg.asc_dvc_cfg.chip_scsi_id; } else { chip_scsi_id = boardp->dvc_var.adv_dvc_var.chip_scsi_id; } - - return totlen; } /* * asc_prt_asc_board_info() * * Print dynamic board configuration information. - * - * Note: no single line should be greater than ASC_PRTLINE_SIZE, - * cf. asc_prt_line(). - * - * Return the number of characters copied into 'cp'. No more than - * 'cplen' characters will be copied to 'cp'. */ -static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_asc_board_info(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); int chip_scsi_id; - int leftlen; - int totlen; - int len; ASC_DVC_VAR *v; ASC_DVC_CFG *c; int i; @@ -3619,105 +3390,79 @@ static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen) c = &boardp->dvc_cfg.asc_dvc_cfg; chip_scsi_id = c->chip_scsi_id; - leftlen = cplen; - totlen = len = 0; + seq_printf(m, + "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", + shost->host_no); - len = asc_prt_line(cp, leftlen, - "\nAsc Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", - shost->host_no); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " - "mcode_version 0x%x, err_code %u\n", - c->chip_version, c->mcode_date, c->mcode_version, - v->err_code); - ASC_PRT_NEXT(); + seq_printf(m, " chip_version %u, mcode_date 0x%x, " + "mcode_version 0x%x, err_code %u\n", + c->chip_version, c->mcode_date, c->mcode_version, + v->err_code); /* Current number of commands waiting for the host. */ - len = asc_prt_line(cp, leftlen, - " Total Command Pending: %d\n", v->cur_total_qng); - ASC_PRT_NEXT(); + seq_printf(m, + " Total Command Pending: %d\n", v->cur_total_qng); - len = asc_prt_line(cp, leftlen, " Command Queuing:"); - ASC_PRT_NEXT(); + seq_printf(m, " Command Queuing:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - len = asc_prt_line(cp, leftlen, " %X:%c", - i, - (v-> - use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? - 'Y' : 'N'); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%c", + i, + (v->use_tagged_qng & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); /* Current number of commands waiting for a device. */ - len = asc_prt_line(cp, leftlen, " Command Queue Pending:"); - ASC_PRT_NEXT(); + seq_printf(m, " Command Queue Pending:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - len = asc_prt_line(cp, leftlen, " %X:%u", i, v->cur_dvc_qng[i]); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%u", i, v->cur_dvc_qng[i]); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); /* Current limit on number of commands that can be sent to a device. */ - len = asc_prt_line(cp, leftlen, " Command Queue Limit:"); - ASC_PRT_NEXT(); + seq_printf(m, " Command Queue Limit:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - len = asc_prt_line(cp, leftlen, " %X:%u", i, v->max_dvc_qng[i]); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%u", i, v->max_dvc_qng[i]); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); /* Indicate whether the device has returned queue full status. */ - len = asc_prt_line(cp, leftlen, " Command Queue Full:"); - ASC_PRT_NEXT(); + seq_printf(m, " Command Queue Full:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) { - len = asc_prt_line(cp, leftlen, " %X:Y-%d", - i, boardp->queue_full_cnt[i]); - } else { - len = asc_prt_line(cp, leftlen, " %X:N", i); - } - ASC_PRT_NEXT(); + if (boardp->queue_full & ADV_TID_TO_TIDMASK(i)) + seq_printf(m, " %X:Y-%d", + i, boardp->queue_full_cnt[i]); + else + seq_printf(m, " %X:N", i); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); - len = asc_prt_line(cp, leftlen, " Synchronous Transfer:"); - ASC_PRT_NEXT(); + seq_printf(m, " Synchronous Transfer:"); for (i = 0; i <= ASC_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - len = asc_prt_line(cp, leftlen, " %X:%c", - i, - (v-> - sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%c", + i, + (v->sdtr_done & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); for (i = 0; i <= ASC_MAX_TID; i++) { uchar syn_period_ix; @@ -3728,69 +3473,48 @@ static int asc_prt_asc_board_info(struct Scsi_Host *shost, char *cp, int cplen) continue; } - len = asc_prt_line(cp, leftlen, " %X:", i); - ASC_PRT_NEXT(); + seq_printf(m, " %X:", i); if ((boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET) == 0) { - len = asc_prt_line(cp, leftlen, " Asynchronous"); - ASC_PRT_NEXT(); + seq_printf(m, " Asynchronous"); } else { syn_period_ix = (boardp->sdtr_data[i] >> 4) & (v->max_sdtr_index - 1); - len = asc_prt_line(cp, leftlen, - " Transfer Period Factor: %d (%d.%d Mhz),", - v->sdtr_period_tbl[syn_period_ix], - 250 / - v->sdtr_period_tbl[syn_period_ix], - ASC_TENTHS(250, - v-> - sdtr_period_tbl - [syn_period_ix])); - ASC_PRT_NEXT(); + seq_printf(m, + " Transfer Period Factor: %d (%d.%d Mhz),", + v->sdtr_period_tbl[syn_period_ix], + 250 / v->sdtr_period_tbl[syn_period_ix], + ASC_TENTHS(250, + v->sdtr_period_tbl[syn_period_ix])); - len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", - boardp-> - sdtr_data[i] & ASC_SYN_MAX_OFFSET); - ASC_PRT_NEXT(); + seq_printf(m, " REQ/ACK Offset: %d", + boardp->sdtr_data[i] & ASC_SYN_MAX_OFFSET); } if ((v->sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { - len = asc_prt_line(cp, leftlen, "*\n"); + seq_printf(m, "*\n"); renegotiate = 1; } else { - len = asc_prt_line(cp, leftlen, "\n"); + seq_printf(m, "\n"); } - ASC_PRT_NEXT(); } if (renegotiate) { - len = asc_prt_line(cp, leftlen, - " * = Re-negotiation pending before next command.\n"); - ASC_PRT_NEXT(); + seq_printf(m, + " * = Re-negotiation pending before next command.\n"); } - - return totlen; } /* * asc_prt_adv_board_info() * * Print dynamic board configuration information. - * - * Note: no single line should be greater than ASC_PRTLINE_SIZE, - * cf. asc_prt_line(). - * - * Return the number of characters copied into 'cp'. No more than - * 'cplen' characters will be copied to 'cp'. */ -static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_adv_board_info(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); - int leftlen; - int totlen; - int len; int i; ADV_DVC_VAR *v; ADV_DVC_CFG *c; @@ -3809,47 +3533,35 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) iop_base = v->iop_base; chip_scsi_id = v->chip_scsi_id; - leftlen = cplen; - totlen = len = 0; - - len = asc_prt_line(cp, leftlen, - "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", - shost->host_no); - ASC_PRT_NEXT(); + seq_printf(m, + "\nAdv Library Configuration and Statistics for AdvanSys SCSI Host %d:\n", + shost->host_no); - len = asc_prt_line(cp, leftlen, - " iop_base 0x%lx, cable_detect: %X, err_code %u\n", - v->iop_base, - AdvReadWordRegister(iop_base, - IOPW_SCSI_CFG1) & CABLE_DETECT, - v->err_code); - ASC_PRT_NEXT(); + seq_printf(m, + " iop_base 0x%lx, cable_detect: %X, err_code %u\n", + (unsigned long)v->iop_base, + AdvReadWordRegister(iop_base,IOPW_SCSI_CFG1) & CABLE_DETECT, + v->err_code); - len = asc_prt_line(cp, leftlen, " chip_version %u, mcode_date 0x%x, " - "mcode_version 0x%x\n", c->chip_version, - c->mcode_date, c->mcode_version); - ASC_PRT_NEXT(); + seq_printf(m, " chip_version %u, mcode_date 0x%x, " + "mcode_version 0x%x\n", c->chip_version, + c->mcode_date, c->mcode_version); AdvReadWordLram(iop_base, ASC_MC_TAGQNG_ABLE, tagqng_able); - len = asc_prt_line(cp, leftlen, " Queuing Enabled:"); - ASC_PRT_NEXT(); + seq_printf(m, " Queuing Enabled:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - len = asc_prt_line(cp, leftlen, " %X:%c", - i, - (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%c", + i, + (tagqng_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); - len = asc_prt_line(cp, leftlen, " Queue Limit:"); - ASC_PRT_NEXT(); + seq_printf(m, " Queue Limit:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { @@ -3859,14 +3571,11 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_MAX_CMD + i, lrambyte); - len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%d", i, lrambyte); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); - len = asc_prt_line(cp, leftlen, " Command Pending:"); - ASC_PRT_NEXT(); + seq_printf(m, " Command Pending:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { @@ -3876,33 +3585,26 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) AdvReadByteLram(iop_base, ASC_MC_NUMBER_OF_QUEUED_CMD + i, lrambyte); - len = asc_prt_line(cp, leftlen, " %X:%d", i, lrambyte); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%d", i, lrambyte); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); AdvReadWordLram(iop_base, ASC_MC_WDTR_ABLE, wdtr_able); - len = asc_prt_line(cp, leftlen, " Wide Enabled:"); - ASC_PRT_NEXT(); + seq_printf(m, " Wide Enabled:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - len = asc_prt_line(cp, leftlen, " %X:%c", - i, - (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%c", + i, + (wdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); AdvReadWordLram(iop_base, ASC_MC_WDTR_DONE, wdtr_done); - len = asc_prt_line(cp, leftlen, " Transfer Bit Width:"); - ASC_PRT_NEXT(); + seq_printf(m, " Transfer Bit Width:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { @@ -3913,37 +3615,30 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) ASC_MC_DEVICE_HSHK_CFG_TABLE + (2 * i), lramword); - len = asc_prt_line(cp, leftlen, " %X:%d", - i, (lramword & 0x8000) ? 16 : 8); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%d", + i, (lramword & 0x8000) ? 16 : 8); if ((wdtr_able & ADV_TID_TO_TIDMASK(i)) && (wdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { - len = asc_prt_line(cp, leftlen, "*"); - ASC_PRT_NEXT(); + seq_printf(m, "*"); renegotiate = 1; } } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); AdvReadWordLram(iop_base, ASC_MC_SDTR_ABLE, sdtr_able); - len = asc_prt_line(cp, leftlen, " Synchronous Enabled:"); - ASC_PRT_NEXT(); + seq_printf(m, " Synchronous Enabled:"); for (i = 0; i <= ADV_MAX_TID; i++) { if ((chip_scsi_id == i) || ((boardp->init_tidmask & ADV_TID_TO_TIDMASK(i)) == 0)) { continue; } - len = asc_prt_line(cp, leftlen, " %X:%c", - i, - (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : - 'N'); - ASC_PRT_NEXT(); + seq_printf(m, " %X:%c", + i, + (sdtr_able & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N'); } - len = asc_prt_line(cp, leftlen, "\n"); - ASC_PRT_NEXT(); + seq_printf(m, "\n"); AdvReadWordLram(iop_base, ASC_MC_SDTR_DONE, sdtr_done); for (i = 0; i <= ADV_MAX_TID; i++) { @@ -3959,358 +3654,170 @@ static int asc_prt_adv_board_info(struct Scsi_Host *shost, char *cp, int cplen) continue; } - len = asc_prt_line(cp, leftlen, " %X:", i); - ASC_PRT_NEXT(); + seq_printf(m, " %X:", i); if ((lramword & 0x1F) == 0) { /* Check for REQ/ACK Offset 0. */ - len = asc_prt_line(cp, leftlen, " Asynchronous"); - ASC_PRT_NEXT(); + seq_printf(m, " Asynchronous"); } else { - len = - asc_prt_line(cp, leftlen, - " Transfer Period Factor: "); - ASC_PRT_NEXT(); + seq_printf(m, " Transfer Period Factor: "); if ((lramword & 0x1F00) == 0x1100) { /* 80 Mhz */ - len = - asc_prt_line(cp, leftlen, "9 (80.0 Mhz),"); - ASC_PRT_NEXT(); + seq_printf(m, "9 (80.0 Mhz),"); } else if ((lramword & 0x1F00) == 0x1000) { /* 40 Mhz */ - len = - asc_prt_line(cp, leftlen, "10 (40.0 Mhz),"); - ASC_PRT_NEXT(); + seq_printf(m, "10 (40.0 Mhz),"); } else { /* 20 Mhz or below. */ period = (((lramword >> 8) * 25) + 50) / 4; if (period == 0) { /* Should never happen. */ - len = - asc_prt_line(cp, leftlen, - "%d (? Mhz), "); - ASC_PRT_NEXT(); + seq_printf(m, "%d (? Mhz), ", period); } else { - len = asc_prt_line(cp, leftlen, - "%d (%d.%d Mhz),", - period, 250 / period, - ASC_TENTHS(250, - period)); - ASC_PRT_NEXT(); + seq_printf(m, + "%d (%d.%d Mhz),", + period, 250 / period, + ASC_TENTHS(250, period)); } } - len = asc_prt_line(cp, leftlen, " REQ/ACK Offset: %d", - lramword & 0x1F); - ASC_PRT_NEXT(); + seq_printf(m, " REQ/ACK Offset: %d", + lramword & 0x1F); } if ((sdtr_done & ADV_TID_TO_TIDMASK(i)) == 0) { - len = asc_prt_line(cp, leftlen, "*\n"); + seq_printf(m, "*\n"); renegotiate = 1; } else { - len = asc_prt_line(cp, leftlen, "\n"); + seq_printf(m, "\n"); } - ASC_PRT_NEXT(); } if (renegotiate) { - len = asc_prt_line(cp, leftlen, - " * = Re-negotiation pending before next command.\n"); - ASC_PRT_NEXT(); + seq_printf(m, + " * = Re-negotiation pending before next command.\n"); } - - return totlen; -} - -/* - * asc_proc_copy() - * - * Copy proc information to a read buffer taking into account the current - * read offset in the file and the remaining space in the read buffer. - */ -static int -asc_proc_copy(off_t advoffset, off_t offset, char *curbuf, int leftlen, - char *cp, int cplen) -{ - int cnt = 0; - - ASC_DBG(2, "offset %d, advoffset %d, cplen %d\n", - (unsigned)offset, (unsigned)advoffset, cplen); - if (offset <= advoffset) { - /* Read offset below current offset, copy everything. */ - cnt = min(cplen, leftlen); - ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", - (ulong)curbuf, (ulong)cp, cnt); - memcpy(curbuf, cp, cnt); - } else if (offset < advoffset + cplen) { - /* Read offset within current range, partial copy. */ - cnt = (advoffset + cplen) - offset; - cp = (cp + cplen) - cnt; - cnt = min(cnt, leftlen); - ASC_DBG(2, "curbuf 0x%lx, cp 0x%lx, cnt %d\n", - (ulong)curbuf, (ulong)cp, cnt); - memcpy(curbuf, cp, cnt); - } - return cnt; } #ifdef ADVANSYS_STATS /* * asc_prt_board_stats() - * - * Note: no single line should be greater than ASC_PRTLINE_SIZE, - * cf. asc_prt_line(). - * - * Return the number of characters copied into 'cp'. No more than - * 'cplen' characters will be copied to 'cp'. */ -static int asc_prt_board_stats(struct Scsi_Host *shost, char *cp, int cplen) +static void asc_prt_board_stats(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); struct asc_stats *s = &boardp->asc_stats; - int leftlen = cplen; - int len, totlen = 0; + seq_printf(m, + "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", + shost->host_no); - len = asc_prt_line(cp, leftlen, - "\nLinux Driver Statistics for AdvanSys SCSI Host %d:\n", - shost->host_no); - ASC_PRT_NEXT(); + seq_printf(m, + " queuecommand %u, reset %u, biosparam %u, interrupt %u\n", + s->queuecommand, s->reset, s->biosparam, + s->interrupt); - len = asc_prt_line(cp, leftlen, - " queuecommand %lu, reset %lu, biosparam %lu, interrupt %lu\n", - s->queuecommand, s->reset, s->biosparam, - s->interrupt); - ASC_PRT_NEXT(); + seq_printf(m, + " callback %u, done %u, build_error %u, build_noreq %u, build_nosg %u\n", + s->callback, s->done, s->build_error, + s->adv_build_noreq, s->adv_build_nosg); - len = asc_prt_line(cp, leftlen, - " callback %lu, done %lu, build_error %lu, build_noreq %lu, build_nosg %lu\n", - s->callback, s->done, s->build_error, - s->adv_build_noreq, s->adv_build_nosg); - ASC_PRT_NEXT(); - - len = asc_prt_line(cp, leftlen, - " exe_noerror %lu, exe_busy %lu, exe_error %lu, exe_unknown %lu\n", - s->exe_noerror, s->exe_busy, s->exe_error, - s->exe_unknown); - ASC_PRT_NEXT(); + seq_printf(m, + " exe_noerror %u, exe_busy %u, exe_error %u, exe_unknown %u\n", + s->exe_noerror, s->exe_busy, s->exe_error, + s->exe_unknown); /* * Display data transfer statistics. */ if (s->xfer_cnt > 0) { - len = asc_prt_line(cp, leftlen, " xfer_cnt %lu, xfer_elem %lu, ", - s->xfer_cnt, s->xfer_elem); - ASC_PRT_NEXT(); + seq_printf(m, " xfer_cnt %u, xfer_elem %u, ", + s->xfer_cnt, s->xfer_elem); - len = asc_prt_line(cp, leftlen, "xfer_bytes %lu.%01lu kb\n", - s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2)); - ASC_PRT_NEXT(); + seq_printf(m, "xfer_bytes %u.%01u kb\n", + s->xfer_sect / 2, ASC_TENTHS(s->xfer_sect, 2)); /* Scatter gather transfer statistics */ - len = asc_prt_line(cp, leftlen, " avg_num_elem %lu.%01lu, ", - s->xfer_elem / s->xfer_cnt, - ASC_TENTHS(s->xfer_elem, s->xfer_cnt)); - ASC_PRT_NEXT(); + seq_printf(m, " avg_num_elem %u.%01u, ", + s->xfer_elem / s->xfer_cnt, + ASC_TENTHS(s->xfer_elem, s->xfer_cnt)); - len = asc_prt_line(cp, leftlen, "avg_elem_size %lu.%01lu kb, ", - (s->xfer_sect / 2) / s->xfer_elem, - ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem)); - ASC_PRT_NEXT(); + seq_printf(m, "avg_elem_size %u.%01u kb, ", + (s->xfer_sect / 2) / s->xfer_elem, + ASC_TENTHS((s->xfer_sect / 2), s->xfer_elem)); - len = asc_prt_line(cp, leftlen, "avg_xfer_size %lu.%01lu kb\n", - (s->xfer_sect / 2) / s->xfer_cnt, - ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt)); - ASC_PRT_NEXT(); + seq_printf(m, "avg_xfer_size %u.%01u kb\n", + (s->xfer_sect / 2) / s->xfer_cnt, + ASC_TENTHS((s->xfer_sect / 2), s->xfer_cnt)); } - - return totlen; } #endif /* ADVANSYS_STATS */ /* - * advansys_proc_info() - /proc/scsi/advansys/{0,1,2,3,...} + * advansys_show_info() - /proc/scsi/advansys/{0,1,2,3,...} * - * *buffer: I/O buffer - * **start: if inout == FALSE pointer into buffer where user read should start - * offset: current offset into a /proc/scsi/advansys/[0...] file - * length: length of buffer - * hostno: Scsi_Host host_no - * inout: TRUE - user is writing; FALSE - user is reading + * m: seq_file to print into + * shost: Scsi_Host * * Return the number of bytes read from or written to a * /proc/scsi/advansys/[0...] file. - * - * Note: This function uses the per board buffer 'prtbuf' which is - * allocated when the board is initialized in advansys_detect(). The - * buffer is ASC_PRTBUF_SIZE bytes. The function asc_proc_copy() is - * used to write to the buffer. The way asc_proc_copy() is written - * if 'prtbuf' is too small it will not be overwritten. Instead the - * user just won't get all the available statistics. */ static int -advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start, - off_t offset, int length, int inout) +advansys_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct asc_board *boardp = shost_priv(shost); - char *cp; - int cplen; - int cnt; - int totcnt; - int leftlen; - char *curbuf; - off_t advoffset; ASC_DBG(1, "begin\n"); /* - * User write not supported. - */ - if (inout == TRUE) - return -ENOSYS; - - /* * User read of /proc/scsi/advansys/[0...] file. */ - /* Copy read data starting at the beginning of the buffer. */ - *start = buffer; - curbuf = buffer; - advoffset = 0; - totcnt = 0; - leftlen = length; - /* * Get board configuration information. * * advansys_info() returns the board string from its own static buffer. */ - cp = (char *)advansys_info(shost); - strcat(cp, "\n"); - cplen = strlen(cp); /* Copy board information. */ - cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); - totcnt += cnt; - leftlen -= cnt; - if (leftlen == 0) { - ASC_DBG(1, "totcnt %d\n", totcnt); - return totcnt; - } - advoffset += cplen; - curbuf += cnt; - + seq_printf(m, "%s\n", (char *)advansys_info(shost)); /* * Display Wide Board BIOS Information. */ - if (!ASC_NARROW_BOARD(boardp)) { - cp = boardp->prtbuf; - cplen = asc_prt_adv_bios(shost, cp, ASC_PRTBUF_SIZE); - BUG_ON(cplen >= ASC_PRTBUF_SIZE); - cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, - cplen); - totcnt += cnt; - leftlen -= cnt; - if (leftlen == 0) { - ASC_DBG(1, "totcnt %d\n", totcnt); - return totcnt; - } - advoffset += cplen; - curbuf += cnt; - } + if (!ASC_NARROW_BOARD(boardp)) + asc_prt_adv_bios(m, shost); /* * Display driver information for each device attached to the board. */ - cp = boardp->prtbuf; - cplen = asc_prt_board_devices(shost, cp, ASC_PRTBUF_SIZE); - BUG_ON(cplen >= ASC_PRTBUF_SIZE); - cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); - totcnt += cnt; - leftlen -= cnt; - if (leftlen == 0) { - ASC_DBG(1, "totcnt %d\n", totcnt); - return totcnt; - } - advoffset += cplen; - curbuf += cnt; + asc_prt_board_devices(m, shost); /* * Display EEPROM configuration for the board. */ - cp = boardp->prtbuf; - if (ASC_NARROW_BOARD(boardp)) { - cplen = asc_prt_asc_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); - } else { - cplen = asc_prt_adv_board_eeprom(shost, cp, ASC_PRTBUF_SIZE); - } - BUG_ON(cplen >= ASC_PRTBUF_SIZE); - cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); - totcnt += cnt; - leftlen -= cnt; - if (leftlen == 0) { - ASC_DBG(1, "totcnt %d\n", totcnt); - return totcnt; - } - advoffset += cplen; - curbuf += cnt; + if (ASC_NARROW_BOARD(boardp)) + asc_prt_asc_board_eeprom(m, shost); + else + asc_prt_adv_board_eeprom(m, shost); /* * Display driver configuration and information for the board. */ - cp = boardp->prtbuf; - cplen = asc_prt_driver_conf(shost, cp, ASC_PRTBUF_SIZE); - BUG_ON(cplen >= ASC_PRTBUF_SIZE); - cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); - totcnt += cnt; - leftlen -= cnt; - if (leftlen == 0) { - ASC_DBG(1, "totcnt %d\n", totcnt); - return totcnt; - } - advoffset += cplen; - curbuf += cnt; + asc_prt_driver_conf(m, shost); #ifdef ADVANSYS_STATS /* * Display driver statistics for the board. */ - cp = boardp->prtbuf; - cplen = asc_prt_board_stats(shost, cp, ASC_PRTBUF_SIZE); - BUG_ON(cplen >= ASC_PRTBUF_SIZE); - cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); - totcnt += cnt; - leftlen -= cnt; - if (leftlen == 0) { - ASC_DBG(1, "totcnt %d\n", totcnt); - return totcnt; - } - advoffset += cplen; - curbuf += cnt; + asc_prt_board_stats(m, shost); #endif /* ADVANSYS_STATS */ /* * Display Asc Library dynamic configuration information * for the board. */ - cp = boardp->prtbuf; - if (ASC_NARROW_BOARD(boardp)) { - cplen = asc_prt_asc_board_info(shost, cp, ASC_PRTBUF_SIZE); - } else { - cplen = asc_prt_adv_board_info(shost, cp, ASC_PRTBUF_SIZE); - } - BUG_ON(cplen >= ASC_PRTBUF_SIZE); - cnt = asc_proc_copy(advoffset, offset, curbuf, leftlen, cp, cplen); - totcnt += cnt; - leftlen -= cnt; - if (leftlen == 0) { - ASC_DBG(1, "totcnt %d\n", totcnt); - return totcnt; - } - advoffset += cplen; - curbuf += cnt; - - ASC_DBG(1, "totcnt %d\n", totcnt); - - return totcnt; + if (ASC_NARROW_BOARD(boardp)) + asc_prt_asc_board_info(m, shost); + else + asc_prt_adv_board_info(m, shost); + return 0; } #endif /* CONFIG_PROC_FS */ @@ -11743,7 +11250,7 @@ static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost) static struct scsi_host_template advansys_template = { .proc_name = DRV_NAME, #ifdef CONFIG_PROC_FS - .proc_info = advansys_proc_info, + .show_info = advansys_show_info, #endif .name = DRV_NAME, .info = advansys_info, @@ -11939,20 +11446,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, #endif /* CONFIG_PCI */ } -#ifdef CONFIG_PROC_FS - /* - * Allocate buffer for printing information from - * /proc/scsi/advansys/[0...]. - */ - boardp->prtbuf = kmalloc(ASC_PRTBUF_SIZE, GFP_KERNEL); - if (!boardp->prtbuf) { - shost_printk(KERN_ERR, shost, "kmalloc(%d) returned NULL\n", - ASC_PRTBUF_SIZE); - ret = -ENOMEM; - goto err_unmap; - } -#endif /* CONFIG_PROC_FS */ - if (ASC_NARROW_BOARD(boardp)) { /* * Set the board bus type and PCI IRQ before @@ -12010,7 +11503,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, } if (ret) - goto err_free_proc; + goto err_unmap; /* * Save the EEPROM configuration so that it can be displayed @@ -12055,7 +11548,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, ASC_DBG(2, "AscInitSetConfig()\n"); ret = AscInitSetConfig(pdev, shost) ? -ENODEV : 0; if (ret) - goto err_free_proc; + goto err_unmap; } else { ADVEEP_3550_CONFIG *ep_3550; ADVEEP_38C0800_CONFIG *ep_38C0800; @@ -12290,7 +11783,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, shost_printk(KERN_ERR, shost, "request_dma() " "%d failed %d\n", shost->dma_channel, ret); - goto err_free_proc; + goto err_unmap; } AscEnableIsaDma(shost->dma_channel); } @@ -12371,8 +11864,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop, if (shost->dma_channel != NO_ISA_DMA) free_dma(shost->dma_channel); #endif - err_free_proc: - kfree(boardp->prtbuf); err_unmap: if (boardp->ioremap_addr) iounmap(boardp->ioremap_addr); @@ -12406,7 +11897,6 @@ static int advansys_release(struct Scsi_Host *shost) iounmap(board->ioremap_addr); advansys_wide_free_mem(board); } - kfree(board->prtbuf); scsi_host_put(shost); ASC_DBG(1, "end\n"); return 0; diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index a284be17699..e86eb6a921f 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -857,7 +857,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) SETPORT(SIMODE0, 0); SETPORT(SIMODE1, 0); - if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) { + if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) { printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq); goto out_host_put; } @@ -891,7 +891,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup) SETPORT(SSTAT0, 0x7f); SETPORT(SSTAT1, 0xef); - if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) { + if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) { printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq); goto out_host_put; } @@ -2977,11 +2977,10 @@ static void show_queues(struct Scsi_Host *shpnt) } #undef SPRINTF -#define SPRINTF(args...) pos += sprintf(pos, ## args) +#define SPRINTF(args...) seq_printf(m, ##args) -static int get_command(char *pos, Scsi_Cmnd * ptr) +static void get_command(struct seq_file *m, Scsi_Cmnd * ptr) { - char *start = pos; int i; SPRINTF("%p: target=%d; lun=%d; cmnd=( ", @@ -3011,13 +3010,10 @@ static int get_command(char *pos, Scsi_Cmnd * ptr) if (ptr->SCp.phase & syncneg) SPRINTF("syncneg|"); SPRINTF("; next=0x%p\n", SCNEXT(ptr)); - - return (pos - start); } -static int get_ports(struct Scsi_Host *shpnt, char *pos) +static void get_ports(struct seq_file *m, struct Scsi_Host *shpnt) { - char *start = pos; int s; SPRINTF("\n%s: %s(%s) ", CURRENT_SC ? "on bus" : "waiting", states[STATE].name, states[PREVSTATE].name); @@ -3273,11 +3269,9 @@ static int get_ports(struct Scsi_Host *shpnt, char *pos) if (s & ENREQINIT) SPRINTF("ENREQINIT "); SPRINTF(")\n"); - - return (pos - start); } -static int aha152x_set_info(char *buffer, int length, struct Scsi_Host *shpnt) +static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) { if(!shpnt || !buffer || length<8 || strncmp("aha152x ", buffer, 8)!=0) return -EINVAL; @@ -3320,26 +3314,11 @@ static int aha152x_set_info(char *buffer, int length, struct Scsi_Host *shpnt) return length; } -#undef SPRINTF -#define SPRINTF(args...) \ - do { if(pos < buffer + length) pos += sprintf(pos, ## args); } while(0) - -static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, - off_t offset, int length, int inout) +static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) { int i; - char *pos = buffer; Scsi_Cmnd *ptr; unsigned long flags; - int thislength; - - DPRINTK(debug_procinfo, - KERN_DEBUG "aha152x_proc_info: buffer=%p offset=%ld length=%d hostno=%d inout=%d\n", - buffer, offset, length, shpnt->host_no, inout); - - - if (inout) - return aha152x_set_info(buffer, length, shpnt); SPRINTF(AHA152X_REVID "\n"); @@ -3392,25 +3371,25 @@ static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start if (ISSUE_SC) { SPRINTF("not yet issued commands:\n"); for (ptr = ISSUE_SC; ptr; ptr = SCNEXT(ptr)) - pos += get_command(pos, ptr); + get_command(m, ptr); } else SPRINTF("no not yet issued commands\n"); DO_UNLOCK(flags); if (CURRENT_SC) { SPRINTF("current command:\n"); - pos += get_command(pos, CURRENT_SC); + get_command(m, CURRENT_SC); } else SPRINTF("no current command\n"); if (DISCONNECTED_SC) { SPRINTF("disconnected commands:\n"); for (ptr = DISCONNECTED_SC; ptr; ptr = SCNEXT(ptr)) - pos += get_command(pos, ptr); + get_command(m, ptr); } else SPRINTF("no disconnected commands\n"); - pos += get_ports(shpnt, pos); + get_ports(m, shpnt); #if defined(AHA152X_STAT) SPRINTF("statistics:\n" @@ -3440,24 +3419,7 @@ static int aha152x_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start HOSTDATA(shpnt)->time[i]); } #endif - - DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: pos=%p\n", pos); - - thislength = pos - (buffer + offset); - DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: length=%d thislength=%d\n", length, thislength); - - if(thislength<0) { - DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: output too short\n"); - *start = NULL; - return 0; - } - - thislength = thislength<length ? thislength : length; - - DPRINTK(debug_procinfo, KERN_DEBUG "aha152x_proc_info: return %d\n", thislength); - - *start = buffer + offset; - return thislength < length ? thislength : length; + return 0; } static int aha152x_adjust_queue(struct scsi_device *device) @@ -3470,7 +3432,8 @@ static struct scsi_host_template aha152x_driver_template = { .module = THIS_MODULE, .name = AHA152X_REVID, .proc_name = "aha152x", - .proc_info = aha152x_proc_info, + .show_info = aha152x_show_info, + .write_info = aha152x_set_info, .queuecommand = aha152x_queue, .eh_abort_handler = aha152x_abort, .eh_device_reset_handler = aha152x_device_reset, diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index df775e6ba57..5f3101797c9 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c @@ -106,33 +106,14 @@ static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu) return hdata->ecb_dma_addr + offset; } -static int aha1740_proc_info(struct Scsi_Host *shpnt, char *buffer, - char **start, off_t offset, - int length, int inout) +static int aha1740_show_info(struct seq_file *m, struct Scsi_Host *shpnt) { - int len; - struct aha1740_hostdata *host; - - if (inout) - return-ENOSYS; - - host = HOSTDATA(shpnt); - - len = sprintf(buffer, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n" + struct aha1740_hostdata *host = HOSTDATA(shpnt); + seq_printf(m, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n" "Extended translation %sabled.\n", shpnt->io_port, shpnt->irq, host->edev->slot, host->translation ? "en" : "dis"); - - if (offset > len) { - *start = buffer; - return 0; - } - - *start = buffer + offset; - len -= offset; - if (len > length) - len = length; - return len; + return 0; } static int aha1740_makecode(unchar *sense, unchar *status) @@ -556,7 +537,7 @@ static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy) static struct scsi_host_template aha1740_template = { .module = THIS_MODULE, .proc_name = "aha1740", - .proc_info = aha1740_proc_info, + .show_info = aha1740_show_info, .name = "Adaptec 174x (EISA)", .queuecommand = aha1740_queuecommand, .bios_param = aha1740_biosparam, diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index 9b059422aac..113874c1284 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -911,7 +911,7 @@ struct vpd_config { uint8_t length; uint8_t revision; uint8_t device_flags; - uint8_t termnation_menus[2]; + uint8_t termination_menus[2]; uint8_t fifo_threshold; uint8_t end_tag; uint8_t vpd_checksum; diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 9328121804b..69d5c43a65e 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -906,7 +906,8 @@ struct scsi_host_template aic79xx_driver_template = { .module = THIS_MODULE, .name = "aic79xx", .proc_name = "aic79xx", - .proc_info = ahd_linux_proc_info, + .show_info = ahd_linux_show_info, + .write_info = ahd_proc_write_seeprom, .info = ahd_linux_info, .queuecommand = ahd_linux_queue, .eh_abort_handler = ahd_linux_abort, @@ -1702,19 +1703,13 @@ ahd_send_async(struct ahd_softc *ahd, char channel, switch (code) { case AC_TRANSFER_NEG: { - char buf[80]; struct scsi_target *starget; - struct info_str info; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; unsigned int target_ppr_options; BUG_ON(target == CAM_TARGET_WILDCARD); - info.buffer = buf; - info.length = sizeof(buf); - info.offset = 0; - info.pos = 0; tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id, target, &tstate); diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h index 28e43498cdf..c58fa33c659 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.h +++ b/drivers/scsi/aic7xxx/aic79xx_osm.h @@ -379,14 +379,6 @@ void ahd_insb(struct ahd_softc * ahd, long port, int ahd_linux_register_host(struct ahd_softc *, struct scsi_host_template *); -/*************************** Pretty Printing **********************************/ -struct info_str { - char *buffer; - int length; - off_t offset; - int pos; -}; - /******************************** Locking *************************************/ static inline void ahd_lockinit(struct ahd_softc *ahd) @@ -513,8 +505,8 @@ ahd_flush_device_writes(struct ahd_softc *ahd) } /**************************** Proc FS Support *********************************/ -int ahd_linux_proc_info(struct Scsi_Host *, char *, char **, - off_t, int, int); +int ahd_proc_write_seeprom(struct Scsi_Host *, char *, int); +int ahd_linux_show_info(struct seq_file *,struct Scsi_Host *); /*********************** Transaction Access Wrappers **************************/ static inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t); diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c index 14b5f8d0e7f..cc9bd26f5d1 100644 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c @@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd) for (bit = 0; bit < 8; bit++) { if ((pci_status[i] & (0x1 << bit)) != 0) { - static const char *s; + const char *s; s = pci_status_strings[bit]; if (i == 7/*TARG*/ && bit == 3) @@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) for (bit = 0; bit < 8; bit++) { - if ((split_status[i] & (0x1 << bit)) != 0) { - static const char *s; - - s = split_status_strings[bit]; - printk(s, ahd_name(ahd), + if ((split_status[i] & (0x1 << bit)) != 0) + printk(split_status_strings[bit], ahd_name(ahd), split_status_source[i]); - } if (i > 1) continue; - if ((sg_split_status[i] & (0x1 << bit)) != 0) { - static const char *s; - - s = split_status_strings[bit]; - printk(s, ahd_name(ahd), "SG"); - } + if ((sg_split_status[i] & (0x1 << bit)) != 0) + printk(split_status_strings[bit], ahd_name(ahd), "SG"); } } /* diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c index 59c85d5a153..e9778b4f7e3 100644 --- a/drivers/scsi/aic7xxx/aic79xx_proc.c +++ b/drivers/scsi/aic7xxx/aic79xx_proc.c @@ -42,16 +42,12 @@ #include "aic79xx_osm.h" #include "aic79xx_inline.h" -static void copy_mem_info(struct info_str *info, char *data, int len); -static int copy_info(struct info_str *info, char *fmt, ...); static void ahd_dump_target_state(struct ahd_softc *ahd, - struct info_str *info, + struct seq_file *m, u_int our_id, char channel, u_int target_id); -static void ahd_dump_device_state(struct info_str *info, +static void ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev); -static int ahd_proc_write_seeprom(struct ahd_softc *ahd, - char *buffer, int length); /* * Table of syncrates that don't follow the "divisible by 4" @@ -93,58 +89,15 @@ ahd_calc_syncsrate(u_int period_factor) return (10000000 / (period_factor * 4 * 10)); } - -static void -copy_mem_info(struct info_str *info, char *data, int len) -{ - if (info->pos + len > info->offset + info->length) - len = info->offset + info->length - info->pos; - - if (info->pos + len < info->offset) { - info->pos += len; - return; - } - - if (info->pos < info->offset) { - off_t partial; - - partial = info->offset - info->pos; - data += partial; - info->pos += partial; - len -= partial; - } - - if (len > 0) { - memcpy(info->buffer, data, len); - info->pos += len; - info->buffer += len; - } -} - -static int -copy_info(struct info_str *info, char *fmt, ...) -{ - va_list args; - char buf[256]; - int len; - - va_start(args, fmt); - len = vsprintf(buf, fmt, args); - va_end(args); - - copy_mem_info(info, buf, len); - return (len); -} - static void -ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo) +ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo) { u_int speed; u_int freq; u_int mb; if (tinfo->period == AHD_PERIOD_UNKNOWN) { - copy_info(info, "Renegotiation Pending\n"); + seq_printf(m, "Renegotiation Pending\n"); return; } speed = 3300; @@ -156,34 +109,34 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo) speed *= (0x01 << tinfo->width); mb = speed / 1000; if (mb > 0) - copy_info(info, "%d.%03dMB/s transfers", mb, speed % 1000); + seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000); else - copy_info(info, "%dKB/s transfers", speed); + seq_printf(m, "%dKB/s transfers", speed); if (freq != 0) { int printed_options; printed_options = 0; - copy_info(info, " (%d.%03dMHz", freq / 1000, freq % 1000); + seq_printf(m, " (%d.%03dMHz", freq / 1000, freq % 1000); if ((tinfo->ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { - copy_info(info, " RDSTRM"); + seq_printf(m, " RDSTRM"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { - copy_info(info, "%s", printed_options ? "|DT" : " DT"); + seq_printf(m, "%s", printed_options ? "|DT" : " DT"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { - copy_info(info, "%s", printed_options ? "|IU" : " IU"); + seq_printf(m, "%s", printed_options ? "|IU" : " IU"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_RTI) != 0) { - copy_info(info, "%s", + seq_printf(m, "%s", printed_options ? "|RTI" : " RTI"); printed_options++; } if ((tinfo->ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { - copy_info(info, "%s", + seq_printf(m, "%s", printed_options ? "|QAS" : " QAS"); printed_options++; } @@ -191,19 +144,19 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo) if (tinfo->width > 0) { if (freq != 0) { - copy_info(info, ", "); + seq_printf(m, ", "); } else { - copy_info(info, " ("); + seq_printf(m, " ("); } - copy_info(info, "%dbit)", 8 * (0x01 << tinfo->width)); + seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); } else if (freq != 0) { - copy_info(info, ")"); + seq_printf(m, ")"); } - copy_info(info, "\n"); + seq_printf(m, "\n"); } static void -ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info, +ahd_dump_target_state(struct ahd_softc *ahd, struct seq_file *m, u_int our_id, char channel, u_int target_id) { struct scsi_target *starget; @@ -213,17 +166,17 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info, tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id, &tstate); - copy_info(info, "Target %d Negotiation Settings\n", target_id); - copy_info(info, "\tUser: "); - ahd_format_transinfo(info, &tinfo->user); + seq_printf(m, "Target %d Negotiation Settings\n", target_id); + seq_printf(m, "\tUser: "); + ahd_format_transinfo(m, &tinfo->user); starget = ahd->platform_data->starget[target_id]; if (starget == NULL) return; - copy_info(info, "\tGoal: "); - ahd_format_transinfo(info, &tinfo->goal); - copy_info(info, "\tCurr: "); - ahd_format_transinfo(info, &tinfo->curr); + seq_printf(m, "\tGoal: "); + ahd_format_transinfo(m, &tinfo->goal); + seq_printf(m, "\tCurr: "); + ahd_format_transinfo(m, &tinfo->curr); for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct scsi_device *dev; @@ -233,29 +186,30 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info, if (dev == NULL) continue; - ahd_dump_device_state(info, dev); + ahd_dump_device_state(m, dev); } } static void -ahd_dump_device_state(struct info_str *info, struct scsi_device *sdev) +ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev) { struct ahd_linux_device *dev = scsi_transport_device_data(sdev); - copy_info(info, "\tChannel %c Target %d Lun %d Settings\n", + seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", sdev->sdev_target->channel + 'A', sdev->sdev_target->id, sdev->lun); - copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued); - copy_info(info, "\t\tCommands Active %d\n", dev->active); - copy_info(info, "\t\tCommand Openings %d\n", dev->openings); - copy_info(info, "\t\tMax Tagged Openings %d\n", dev->maxtags); - copy_info(info, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); + seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); + seq_printf(m, "\t\tCommands Active %d\n", dev->active); + seq_printf(m, "\t\tCommand Openings %d\n", dev->openings); + seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags); + seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); } -static int -ahd_proc_write_seeprom(struct ahd_softc *ahd, char *buffer, int length) +int +ahd_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length) { + struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; ahd_mode_state saved_modes; int have_seeprom; u_long s; @@ -319,64 +273,45 @@ done: * Return information to handle /proc support for the driver. */ int -ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, - off_t offset, int length, int inout) +ahd_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata; - struct info_str info; char ahd_info[256]; u_int max_targ; u_int i; - int retval; - /* Has data been written to the file? */ - if (inout == TRUE) { - retval = ahd_proc_write_seeprom(ahd, buffer, length); - goto done; - } - - if (start) - *start = buffer; - - info.buffer = buffer; - info.length = length; - info.offset = offset; - info.pos = 0; - - copy_info(&info, "Adaptec AIC79xx driver version: %s\n", + seq_printf(m, "Adaptec AIC79xx driver version: %s\n", AIC79XX_DRIVER_VERSION); - copy_info(&info, "%s\n", ahd->description); + seq_printf(m, "%s\n", ahd->description); ahd_controller_info(ahd, ahd_info); - copy_info(&info, "%s\n", ahd_info); - copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n", + seq_printf(m, "%s\n", ahd_info); + seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n", ahd->scb_data.numscbs, AHD_NSEG); max_targ = 16; if (ahd->seep_config == NULL) - copy_info(&info, "No Serial EEPROM\n"); + seq_printf(m, "No Serial EEPROM\n"); else { - copy_info(&info, "Serial EEPROM:\n"); + seq_printf(m, "Serial EEPROM:\n"); for (i = 0; i < sizeof(*ahd->seep_config)/2; i++) { if (((i % 8) == 0) && (i != 0)) { - copy_info(&info, "\n"); + seq_printf(m, "\n"); } - copy_info(&info, "0x%.4x ", + seq_printf(m, "0x%.4x ", ((uint16_t*)ahd->seep_config)[i]); } - copy_info(&info, "\n"); + seq_printf(m, "\n"); } - copy_info(&info, "\n"); + seq_printf(m, "\n"); if ((ahd->features & AHD_WIDE) == 0) max_targ = 8; for (i = 0; i < max_targ; i++) { - ahd_dump_target_state(ahd, &info, ahd->our_id, 'A', + ahd_dump_target_state(ahd, m, ahd->our_id, 'A', /*target_id*/i); } - retval = info.pos > info.offset ? info.pos - info.offset : 0; -done: - return (retval); + return 0; } diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 5a477cdc780..114ff0c6e31 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -145,16 +145,6 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL; #endif /* - * Control collection of SCSI transfer statistics for the /proc filesystem. - * - * NOTE: Do NOT enable this when running on kernels version 1.2.x and below. - * NOTE: This does affect performance since it has to maintain statistics. - */ -#ifdef CONFIG_AIC7XXX_PROC_STATS -#define AIC7XXX_PROC_STATS -#endif - -/* * To change the default number of tagged transactions allowed per-device, * add a line to the lilo.conf file like: * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" @@ -803,7 +793,8 @@ struct scsi_host_template aic7xxx_driver_template = { .module = THIS_MODULE, .name = "aic7xxx", .proc_name = "aic7xxx", - .proc_info = ahc_linux_proc_info, + .show_info = ahc_linux_show_info, + .write_info = ahc_proc_write_seeprom, .info = ahc_linux_info, .queuecommand = ahc_linux_queue, .eh_abort_handler = ahc_linux_abort, @@ -1631,10 +1622,8 @@ ahc_send_async(struct ahc_softc *ahc, char channel, switch (code) { case AC_TRANSFER_NEG: { - char buf[80]; struct scsi_target *starget; struct ahc_linux_target *targ; - struct info_str info; struct ahc_initiator_tinfo *tinfo; struct ahc_tmode_tstate *tstate; int target_offset; @@ -1642,10 +1631,6 @@ ahc_send_async(struct ahc_softc *ahc, char channel, BUG_ON(target == CAM_TARGET_WILDCARD); - info.buffer = buf; - info.length = sizeof(buf); - info.offset = 0; - info.pos = 0; tinfo = ahc_fetch_transinfo(ahc, channel, channel == 'A' ? ahc->our_id : ahc->our_id_b, diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h index bca0fb83f55..bc4cca92ff0 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.h +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h @@ -383,14 +383,6 @@ void ahc_insb(struct ahc_softc * ahc, long port, int ahc_linux_register_host(struct ahc_softc *, struct scsi_host_template *); -/*************************** Pretty Printing **********************************/ -struct info_str { - char *buffer; - int length; - off_t offset; - int pos; -}; - /******************************** Locking *************************************/ /* Lock protecting internal data structures */ @@ -523,8 +515,8 @@ ahc_flush_device_writes(struct ahc_softc *ahc) } /**************************** Proc FS Support *********************************/ -int ahc_linux_proc_info(struct Scsi_Host *, char *, char **, - off_t, int, int); +int ahc_proc_write_seeprom(struct Scsi_Host *, char *, int); +int ahc_linux_show_info(struct seq_file *, struct Scsi_Host *); /*************************** Domain Validation ********************************/ /*********************** Transaction Access Wrappers *************************/ diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index 6917b4f5ac9..22d5a949ec8 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci) * ID as valid. */ if (ahc_get_pci_function(pci) > 0 - && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice) + && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor) && SUBID_9005_MFUNCENB(subdevice) == 0) return (NULL); diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c index f2525f8ed1c..383a3d11652 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_proc.c +++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c @@ -43,16 +43,12 @@ #include "aic7xxx_inline.h" #include "aic7xxx_93cx6.h" -static void copy_mem_info(struct info_str *info, char *data, int len); -static int copy_info(struct info_str *info, char *fmt, ...); static void ahc_dump_target_state(struct ahc_softc *ahc, - struct info_str *info, + struct seq_file *m, u_int our_id, char channel, u_int target_id, u_int target_offset); -static void ahc_dump_device_state(struct info_str *info, +static void ahc_dump_device_state(struct seq_file *m, struct scsi_device *dev); -static int ahc_proc_write_seeprom(struct ahc_softc *ahc, - char *buffer, int length); /* * Table of syncrates that don't follow the "divisible by 4" @@ -94,51 +90,8 @@ ahc_calc_syncsrate(u_int period_factor) return (10000000 / (period_factor * 4 * 10)); } - -static void -copy_mem_info(struct info_str *info, char *data, int len) -{ - if (info->pos + len > info->offset + info->length) - len = info->offset + info->length - info->pos; - - if (info->pos + len < info->offset) { - info->pos += len; - return; - } - - if (info->pos < info->offset) { - off_t partial; - - partial = info->offset - info->pos; - data += partial; - info->pos += partial; - len -= partial; - } - - if (len > 0) { - memcpy(info->buffer, data, len); - info->pos += len; - info->buffer += len; - } -} - -static int -copy_info(struct info_str *info, char *fmt, ...) -{ - va_list args; - char buf[256]; - int len; - - va_start(args, fmt); - len = vsprintf(buf, fmt, args); - va_end(args); - - copy_mem_info(info, buf, len); - return (len); -} - static void -ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo) +ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo) { u_int speed; u_int freq; @@ -153,12 +106,12 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo) speed *= (0x01 << tinfo->width); mb = speed / 1000; if (mb > 0) - copy_info(info, "%d.%03dMB/s transfers", mb, speed % 1000); + seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000); else - copy_info(info, "%dKB/s transfers", speed); + seq_printf(m, "%dKB/s transfers", speed); if (freq != 0) { - copy_info(info, " (%d.%03dMHz%s, offset %d", + seq_printf(m, " (%d.%03dMHz%s, offset %d", freq / 1000, freq % 1000, (tinfo->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 ? " DT" : "", tinfo->offset); @@ -166,19 +119,19 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo) if (tinfo->width > 0) { if (freq != 0) { - copy_info(info, ", "); + seq_printf(m, ", "); } else { - copy_info(info, " ("); + seq_printf(m, " ("); } - copy_info(info, "%dbit)", 8 * (0x01 << tinfo->width)); + seq_printf(m, "%dbit)", 8 * (0x01 << tinfo->width)); } else if (freq != 0) { - copy_info(info, ")"); + seq_printf(m, ")"); } - copy_info(info, "\n"); + seq_printf(m, "\n"); } static void -ahc_dump_target_state(struct ahc_softc *ahc, struct info_str *info, +ahc_dump_target_state(struct ahc_softc *ahc, struct seq_file *m, u_int our_id, char channel, u_int target_id, u_int target_offset) { @@ -190,18 +143,18 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct info_str *info, tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id, &tstate); if ((ahc->features & AHC_TWIN) != 0) - copy_info(info, "Channel %c ", channel); - copy_info(info, "Target %d Negotiation Settings\n", target_id); - copy_info(info, "\tUser: "); - ahc_format_transinfo(info, &tinfo->user); + seq_printf(m, "Channel %c ", channel); + seq_printf(m, "Target %d Negotiation Settings\n", target_id); + seq_printf(m, "\tUser: "); + ahc_format_transinfo(m, &tinfo->user); starget = ahc->platform_data->starget[target_offset]; if (!starget) return; - copy_info(info, "\tGoal: "); - ahc_format_transinfo(info, &tinfo->goal); - copy_info(info, "\tCurr: "); - ahc_format_transinfo(info, &tinfo->curr); + seq_printf(m, "\tGoal: "); + ahc_format_transinfo(m, &tinfo->goal); + seq_printf(m, "\tCurr: "); + ahc_format_transinfo(m, &tinfo->curr); for (lun = 0; lun < AHC_NUM_LUNS; lun++) { struct scsi_device *sdev; @@ -211,29 +164,30 @@ ahc_dump_target_state(struct ahc_softc *ahc, struct info_str *info, if (sdev == NULL) continue; - ahc_dump_device_state(info, sdev); + ahc_dump_device_state(m, sdev); } } static void -ahc_dump_device_state(struct info_str *info, struct scsi_device *sdev) +ahc_dump_device_state(struct seq_file *m, struct scsi_device *sdev) { struct ahc_linux_device *dev = scsi_transport_device_data(sdev); - copy_info(info, "\tChannel %c Target %d Lun %d Settings\n", + seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n", sdev->sdev_target->channel + 'A', sdev->sdev_target->id, sdev->lun); - copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued); - copy_info(info, "\t\tCommands Active %d\n", dev->active); - copy_info(info, "\t\tCommand Openings %d\n", dev->openings); - copy_info(info, "\t\tMax Tagged Openings %d\n", dev->maxtags); - copy_info(info, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); + seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued); + seq_printf(m, "\t\tCommands Active %d\n", dev->active); + seq_printf(m, "\t\tCommand Openings %d\n", dev->openings); + seq_printf(m, "\t\tMax Tagged Openings %d\n", dev->maxtags); + seq_printf(m, "\t\tDevice Queue Frozen Count %d\n", dev->qfrozen); } -static int -ahc_proc_write_seeprom(struct ahc_softc *ahc, char *buffer, int length) +int +ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length) { + struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; struct seeprom_descriptor sd; int have_seeprom; u_long s; @@ -332,53 +286,36 @@ done: * Return information to handle /proc support for the driver. */ int -ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, - off_t offset, int length, int inout) +ahc_linux_show_info(struct seq_file *m, struct Scsi_Host *shost) { struct ahc_softc *ahc = *(struct ahc_softc **)shost->hostdata; - struct info_str info; char ahc_info[256]; u_int max_targ; u_int i; - int retval; - /* Has data been written to the file? */ - if (inout == TRUE) { - retval = ahc_proc_write_seeprom(ahc, buffer, length); - goto done; - } - - if (start) - *start = buffer; - - info.buffer = buffer; - info.length = length; - info.offset = offset; - info.pos = 0; - - copy_info(&info, "Adaptec AIC7xxx driver version: %s\n", + seq_printf(m, "Adaptec AIC7xxx driver version: %s\n", AIC7XXX_DRIVER_VERSION); - copy_info(&info, "%s\n", ahc->description); + seq_printf(m, "%s\n", ahc->description); ahc_controller_info(ahc, ahc_info); - copy_info(&info, "%s\n", ahc_info); - copy_info(&info, "Allocated SCBs: %d, SG List Length: %d\n\n", + seq_printf(m, "%s\n", ahc_info); + seq_printf(m, "Allocated SCBs: %d, SG List Length: %d\n\n", ahc->scb_data->numscbs, AHC_NSEG); if (ahc->seep_config == NULL) - copy_info(&info, "No Serial EEPROM\n"); + seq_printf(m, "No Serial EEPROM\n"); else { - copy_info(&info, "Serial EEPROM:\n"); + seq_printf(m, "Serial EEPROM:\n"); for (i = 0; i < sizeof(*ahc->seep_config)/2; i++) { if (((i % 8) == 0) && (i != 0)) { - copy_info(&info, "\n"); + seq_printf(m, "\n"); } - copy_info(&info, "0x%.4x ", + seq_printf(m, "0x%.4x ", ((uint16_t*)ahc->seep_config)[i]); } - copy_info(&info, "\n"); + seq_printf(m, "\n"); } - copy_info(&info, "\n"); + seq_printf(m, "\n"); max_targ = 16; if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) @@ -398,10 +335,8 @@ ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, target_id = i % 8; } - ahc_dump_target_state(ahc, &info, our_id, + ahc_dump_target_state(ahc, m, our_id, channel, target_id, i); } - retval = info.pos > info.offset ? info.pos - info.offset : 0; -done: - return (retval); + return 0; } diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h index 9df9e2ce353..8373447bd7d 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h @@ -209,7 +209,6 @@ struct instruction { #define AIC_OP_JC16 0x9105 #define AIC_OP_JNC16 0x9205 #define AIC_OP_CALL16 0x9305 -#define AIC_OP_CALL16 0x9305 /* Page extension is low three bits of second opcode byte. */ #define AIC_OP_JMPF 0xA005 diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c deleted file mode 100644 index 5b212f0df89..00000000000 --- a/drivers/scsi/aic7xxx_old.c +++ /dev/null @@ -1,11149 +0,0 @@ -/*+M************************************************************************* - * Adaptec AIC7xxx device driver for Linux. - * - * Copyright (c) 1994 John Aycock - * The University of Calgary Department of Computer Science. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F - * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA - * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide, - * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux, - * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file - * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual, - * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the - * ANSI SCSI-2 specification (draft 10c), ... - * - * -------------------------------------------------------------------------- - * - * Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org): - * - * Substantially modified to include support for wide and twin bus - * adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes, - * SCB paging, and other rework of the code. - * - * Parts of this driver were also based on the FreeBSD driver by - * Justin T. Gibbs. His copyright follows: - * - * -------------------------------------------------------------------------- - * Copyright (c) 1994-1997 Justin Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification, immediately at the beginning of the file. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of - * the GNU General Public License ("GPL") and the terms of the GPL would require the - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: aic7xxx.c,v 1.119 1997/06/27 19:39:18 gibbs Exp $ - *--------------------------------------------------------------------------- - * - * Thanks also go to (in alphabetical order) the following: - * - * Rory Bolt - Sequencer bug fixes - * Jay Estabrook - Initial DEC Alpha support - * Doug Ledford - Much needed abort/reset bug fixes - * Kai Makisara - DMAing of SCBs - * - * A Boot time option was also added for not resetting the scsi bus. - * - * Form: aic7xxx=extended - * aic7xxx=no_reset - * aic7xxx=ultra - * aic7xxx=irq_trigger:[0,1] # 0 edge, 1 level - * aic7xxx=verbose - * - * Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97 - * - * $Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp $ - *-M*************************************************************************/ - -/*+M************************************************************************** - * - * Further driver modifications made by Doug Ledford <dledford@redhat.com> - * - * Copyright (c) 1997-1999 Doug Ledford - * - * These changes are released under the same licensing terms as the FreeBSD - * driver written by Justin Gibbs. Please see his Copyright notice above - * for the exact terms and conditions covering my changes as well as the - * warranty statement. - * - * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include - * but are not limited to: - * - * 1: Import of the latest FreeBSD sequencer code for this driver - * 2: Modification of kernel code to accommodate different sequencer semantics - * 3: Extensive changes throughout kernel portion of driver to improve - * abort/reset processing and error hanndling - * 4: Other work contributed by various people on the Internet - * 5: Changes to printk information and verbosity selection code - * 6: General reliability related changes, especially in IRQ management - * 7: Modifications to the default probe/attach order for supported cards - * 8: SMP friendliness has been improved - * - * Overall, this driver represents a significant departure from the official - * aic7xxx driver released by Dan Eischen in two ways. First, in the code - * itself. A diff between the two version of the driver is now a several - * thousand line diff. Second, in approach to solving the same problem. The - * problem is importing the FreeBSD aic7xxx driver code to linux can be a - * difficult and time consuming process, that also can be error prone. Dan - * Eischen's official driver uses the approach that the linux and FreeBSD - * drivers should be as identical as possible. To that end, his next version - * of this driver will be using a mid-layer code library that he is developing - * to moderate communications between the linux mid-level SCSI code and the - * low level FreeBSD driver. He intends to be able to essentially drop the - * FreeBSD driver into the linux kernel with only a few minor tweaks to some - * include files and the like and get things working, making for fast easy - * imports of the FreeBSD code into linux. - * - * I disagree with Dan's approach. Not that I don't think his way of doing - * things would be nice, easy to maintain, and create a more uniform driver - * between FreeBSD and Linux. I have no objection to those issues. My - * disagreement is on the needed functionality. There simply are certain - * things that are done differently in FreeBSD than linux that will cause - * problems for this driver regardless of any middle ware Dan implements. - * The biggest example of this at the moment is interrupt semantics. Linux - * doesn't provide the same protection techniques as FreeBSD does, nor can - * they be easily implemented in any middle ware code since they would truly - * belong in the kernel proper and would effect all drivers. For the time - * being, I see issues such as these as major stumbling blocks to the - * reliability of code based upon such middle ware. Therefore, I choose to - * use a different approach to importing the FreeBSD code that doesn't - * involve any middle ware type code. My approach is to import the sequencer - * code from FreeBSD wholesale. Then, to only make changes in the kernel - * portion of the driver as they are needed for the new sequencer semantics. - * In this way, the portion of the driver that speaks to the rest of the - * linux kernel is fairly static and can be changed/modified to solve - * any problems one might encounter without concern for the FreeBSD driver. - * - * Note: If time and experience should prove me wrong that the middle ware - * code Dan writes is reliable in its operation, then I'll retract my above - * statements. But, for those that don't know, I'm from Missouri (in the US) - * and our state motto is "The Show-Me State". Well, before I will put - * faith into it, you'll have to show me that it works :) - * - *_M*************************************************************************/ - -/* - * The next three defines are user configurable. These should be the only - * defines a user might need to get in here and change. There are other - * defines buried deeper in the code, but those really shouldn't need touched - * under normal conditions. - */ - -/* - * AIC7XXX_STRICT_PCI_SETUP - * Should we assume the PCI config options on our controllers are set with - * sane and proper values, or should we be anal about our PCI config - * registers and force them to what we want? The main advantage to - * defining this option is on non-Intel hardware where the BIOS may not - * have been run to set things up, or if you have one of the BIOSless - * Adaptec controllers, such as a 2910, that don't get set up by the - * BIOS. However, keep in mind that we really do set the most important - * items in the driver regardless of this setting, this only controls some - * of the more esoteric PCI options on these cards. In that sense, I - * would default to leaving this off. However, if people wish to try - * things both ways, that would also help me to know if there are some - * machines where it works one way but not another. - * - * -- July 7, 17:09 - * OK...I need this on my machine for testing, so the default is to - * leave it defined. - * - * -- July 7, 18:49 - * I needed it for testing, but it didn't make any difference, so back - * off she goes. - * - * -- July 16, 23:04 - * I turned it back on to try and compensate for the 2.1.x PCI code - * which no longer relies solely on the BIOS and now tries to set - * things itself. - */ - -#define AIC7XXX_STRICT_PCI_SETUP - -/* - * AIC7XXX_VERBOSE_DEBUGGING - * This option enables a lot of extra printk();s in the code, surrounded - * by if (aic7xxx_verbose ...) statements. Executing all of those if - * statements and the extra checks can get to where it actually does have - * an impact on CPU usage and such, as well as code size. Disabling this - * define will keep some of those from becoming part of the code. - * - * NOTE: Currently, this option has no real effect, I will be adding the - * various #ifdef's in the code later when I've decided a section is - * complete and no longer needs debugging. OK...a lot of things are now - * surrounded by this define, so turning this off does have an impact. - */ - -/* - * #define AIC7XXX_VERBOSE_DEBUGGING - */ - -#include <linux/module.h> -#include <stdarg.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/byteorder.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/ioport.h> -#include <linux/delay.h> -#include <linux/pci.h> -#include <linux/proc_fs.h> -#include <linux/blkdev.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include "scsi.h" -#include <scsi/scsi_host.h> -#include "aic7xxx_old/aic7xxx.h" - -#include "aic7xxx_old/sequencer.h" -#include "aic7xxx_old/scsi_message.h" -#include "aic7xxx_old/aic7xxx_reg.h" -#include <scsi/scsicam.h> - -#include <linux/stat.h> -#include <linux/slab.h> /* for kmalloc() */ - -#define AIC7XXX_C_VERSION "5.2.6" - -#define ALL_TARGETS -1 -#define ALL_CHANNELS -1 -#define ALL_LUNS -1 -#define MAX_TARGETS 16 -#define MAX_LUNS 8 -#ifndef TRUE -# define TRUE 1 -#endif -#ifndef FALSE -# define FALSE 0 -#endif - -#if defined(__powerpc__) || defined(__i386__) || defined(__x86_64__) -# define MMAPIO -#endif - -/* - * You can try raising me for better performance or lowering me if you have - * flaky devices that go off the scsi bus when hit with too many tagged - * commands (like some IBM SCSI-3 LVD drives). - */ -#define AIC7XXX_CMDS_PER_DEVICE 32 - -typedef struct -{ - unsigned char tag_commands[16]; /* Allow for wide/twin adapters. */ -} adapter_tag_info_t; - -/* - * Make a define that will tell the driver not to the default tag depth - * everywhere. - */ -#define DEFAULT_TAG_COMMANDS {0, 0, 0, 0, 0, 0, 0, 0,\ - 0, 0, 0, 0, 0, 0, 0, 0} - -/* - * Modify this as you see fit for your system. By setting tag_commands - * to 0, the driver will use it's own algorithm for determining the - * number of commands to use (see above). When 255, the driver will - * not enable tagged queueing for that particular device. When positive - * (> 0) and (< 255) the values in the array are used for the queue_depth. - * Note that the maximum value for an entry is 254, but you're insane if - * you try to use that many commands on one device. - * - * In this example, the first line will disable tagged queueing for all - * the devices on the first probed aic7xxx adapter. - * - * The second line enables tagged queueing with 4 commands/LUN for IDs - * (1, 2-11, 13-15), disables tagged queueing for ID 12, and tells the - * driver to use its own algorithm for ID 1. - * - * The third line is the same as the first line. - * - * The fourth line disables tagged queueing for devices 0 and 3. It - * enables tagged queueing for the other IDs, with 16 commands/LUN - * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for - * IDs 2, 5-7, and 9-15. - */ - -/* - * NOTE: The below structure is for reference only, the actual structure - * to modify in order to change things is found after this fake one. - * -adapter_tag_info_t aic7xxx_tag_info[] = -{ - {DEFAULT_TAG_COMMANDS}, - {{4, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 255, 4, 4, 4}}, - {DEFAULT_TAG_COMMANDS}, - {{255, 16, 4, 255, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}} -}; -*/ - -static adapter_tag_info_t aic7xxx_tag_info[] = -{ - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS}, - {DEFAULT_TAG_COMMANDS} -}; - - -/* - * Define an array of board names that can be indexed by aha_type. - * Don't forget to change this when changing the types! - */ -static const char *board_names[] = { - "AIC-7xxx Unknown", /* AIC_NONE */ - "Adaptec AIC-7810 Hardware RAID Controller", /* AIC_7810 */ - "Adaptec AIC-7770 SCSI host adapter", /* AIC_7770 */ - "Adaptec AHA-274X SCSI host adapter", /* AIC_7771 */ - "Adaptec AHA-284X SCSI host adapter", /* AIC_284x */ - "Adaptec AIC-7850 SCSI host adapter", /* AIC_7850 */ - "Adaptec AIC-7855 SCSI host adapter", /* AIC_7855 */ - "Adaptec AIC-7860 Ultra SCSI host adapter", /* AIC_7860 */ - "Adaptec AHA-2940A Ultra SCSI host adapter", /* AIC_7861 */ - "Adaptec AIC-7870 SCSI host adapter", /* AIC_7870 */ - "Adaptec AHA-294X SCSI host adapter", /* AIC_7871 */ - "Adaptec AHA-394X SCSI host adapter", /* AIC_7872 */ - "Adaptec AHA-398X SCSI host adapter", /* AIC_7873 */ - "Adaptec AHA-2944 SCSI host adapter", /* AIC_7874 */ - "Adaptec AIC-7880 Ultra SCSI host adapter", /* AIC_7880 */ - "Adaptec AHA-294X Ultra SCSI host adapter", /* AIC_7881 */ - "Adaptec AHA-394X Ultra SCSI host adapter", /* AIC_7882 */ - "Adaptec AHA-398X Ultra SCSI host adapter", /* AIC_7883 */ - "Adaptec AHA-2944 Ultra SCSI host adapter", /* AIC_7884 */ - "Adaptec AHA-2940UW Pro Ultra SCSI host adapter", /* AIC_7887 */ - "Adaptec AIC-7895 Ultra SCSI host adapter", /* AIC_7895 */ - "Adaptec AIC-7890/1 Ultra2 SCSI host adapter", /* AIC_7890 */ - "Adaptec AHA-293X Ultra2 SCSI host adapter", /* AIC_7890 */ - "Adaptec AHA-294X Ultra2 SCSI host adapter", /* AIC_7890 */ - "Adaptec AIC-7896/7 Ultra2 SCSI host adapter", /* AIC_7896 */ - "Adaptec AHA-394X Ultra2 SCSI host adapter", /* AIC_7897 */ - "Adaptec AHA-395X Ultra2 SCSI host adapter", /* AIC_7897 */ - "Adaptec PCMCIA SCSI controller", /* card bus stuff */ - "Adaptec AIC-7892 Ultra 160/m SCSI host adapter", /* AIC_7892 */ - "Adaptec AIC-7899 Ultra 160/m SCSI host adapter", /* AIC_7899 */ -}; - -/* - * There should be a specific return value for this in scsi.h, but - * it seems that most drivers ignore it. - */ -#define DID_UNDERFLOW DID_ERROR - -/* - * What we want to do is have the higher level scsi driver requeue - * the command to us. There is no specific driver status for this - * condition, but the higher level scsi driver will requeue the - * command on a DID_BUS_BUSY error. - * - * Upon further inspection and testing, it seems that DID_BUS_BUSY - * will *always* retry the command. We can get into an infinite loop - * if this happens when we really want some sort of counter that - * will automatically abort/reset the command after so many retries. - * Using DID_ERROR will do just that. (Made by a suggestion by - * Doug Ledford 8/1/96) - */ -#define DID_RETRY_COMMAND DID_ERROR - -#define HSCSIID 0x07 -#define SCSI_RESET 0x040 - -/* - * EISA/VL-bus stuff - */ -#define MINSLOT 1 -#define MAXSLOT 15 -#define SLOTBASE(x) ((x) << 12) -#define BASE_TO_SLOT(x) ((x) >> 12) - -/* - * Standard EISA Host ID regs (Offset from slot base) - */ -#define AHC_HID0 0x80 /* 0,1: msb of ID2, 2-7: ID1 */ -#define AHC_HID1 0x81 /* 0-4: ID3, 5-7: LSB ID2 */ -#define AHC_HID2 0x82 /* product */ -#define AHC_HID3 0x83 /* firmware revision */ - -/* - * AIC-7770 I/O range to reserve for a card - */ -#define MINREG 0xC00 -#define MAXREG 0xCFF - -#define INTDEF 0x5C /* Interrupt Definition Register */ - -/* - * AIC-78X0 PCI registers - */ -#define CLASS_PROGIF_REVID 0x08 -#define DEVREVID 0x000000FFul -#define PROGINFC 0x0000FF00ul -#define SUBCLASS 0x00FF0000ul -#define BASECLASS 0xFF000000ul - -#define CSIZE_LATTIME 0x0C -#define CACHESIZE 0x0000003Ful /* only 5 bits */ -#define LATTIME 0x0000FF00ul - -#define DEVCONFIG 0x40 -#define SCBSIZE32 0x00010000ul /* aic789X only */ -#define MPORTMODE 0x00000400ul /* aic7870 only */ -#define RAMPSM 0x00000200ul /* aic7870 only */ -#define RAMPSM_ULTRA2 0x00000004 -#define VOLSENSE 0x00000100ul -#define SCBRAMSEL 0x00000080ul -#define SCBRAMSEL_ULTRA2 0x00000008 -#define MRDCEN 0x00000040ul -#define EXTSCBTIME 0x00000020ul /* aic7870 only */ -#define EXTSCBPEN 0x00000010ul /* aic7870 only */ -#define BERREN 0x00000008ul -#define DACEN 0x00000004ul -#define STPWLEVEL 0x00000002ul -#define DIFACTNEGEN 0x00000001ul /* aic7870 only */ - -#define SCAMCTL 0x1a /* Ultra2 only */ -#define CCSCBBADDR 0xf0 /* aic7895/6/7 */ - -/* - * Define the different types of SEEPROMs on aic7xxx adapters - * and make it also represent the address size used in accessing - * its registers. The 93C46 chips have 1024 bits organized into - * 64 16-bit words, while the 93C56 chips have 2048 bits organized - * into 128 16-bit words. The C46 chips use 6 bits to address - * each word, while the C56 and C66 (4096 bits) use 8 bits to - * address each word. - */ -typedef enum {C46 = 6, C56_66 = 8} seeprom_chip_type; - -/* - * - * Define the format of the SEEPROM registers (16 bits). - * - */ -struct seeprom_config { - -/* - * SCSI ID Configuration Flags - */ -#define CFXFER 0x0007 /* synchronous transfer rate */ -#define CFSYNCH 0x0008 /* enable synchronous transfer */ -#define CFDISC 0x0010 /* enable disconnection */ -#define CFWIDEB 0x0020 /* wide bus device (wide card) */ -#define CFSYNCHISULTRA 0x0040 /* CFSYNC is an ultra offset */ -#define CFNEWULTRAFORMAT 0x0080 /* Use the Ultra2 SEEPROM format */ -#define CFSTART 0x0100 /* send start unit SCSI command */ -#define CFINCBIOS 0x0200 /* include in BIOS scan */ -#define CFRNFOUND 0x0400 /* report even if not found */ -#define CFMULTILUN 0x0800 /* probe mult luns in BIOS scan */ -#define CFWBCACHEYES 0x4000 /* Enable W-Behind Cache on drive */ -#define CFWBCACHENC 0xc000 /* Don't change W-Behind Cache */ -/* UNUSED 0x3000 */ - unsigned short device_flags[16]; /* words 0-15 */ - -/* - * BIOS Control Bits - */ -#define CFSUPREM 0x0001 /* support all removable drives */ -#define CFSUPREMB 0x0002 /* support removable drives for boot only */ -#define CFBIOSEN 0x0004 /* BIOS enabled */ -/* UNUSED 0x0008 */ -#define CFSM2DRV 0x0010 /* support more than two drives */ -#define CF284XEXTEND 0x0020 /* extended translation (284x cards) */ -/* UNUSED 0x0040 */ -#define CFEXTEND 0x0080 /* extended translation enabled */ -/* UNUSED 0xFF00 */ - unsigned short bios_control; /* word 16 */ - -/* - * Host Adapter Control Bits - */ -#define CFAUTOTERM 0x0001 /* Perform Auto termination */ -#define CFULTRAEN 0x0002 /* Ultra SCSI speed enable (Ultra cards) */ -#define CF284XSELTO 0x0003 /* Selection timeout (284x cards) */ -#define CF284XFIFO 0x000C /* FIFO Threshold (284x cards) */ -#define CFSTERM 0x0004 /* SCSI low byte termination */ -#define CFWSTERM 0x0008 /* SCSI high byte termination (wide card) */ -#define CFSPARITY 0x0010 /* SCSI parity */ -#define CF284XSTERM 0x0020 /* SCSI low byte termination (284x cards) */ -#define CFRESETB 0x0040 /* reset SCSI bus at boot */ -#define CFBPRIMARY 0x0100 /* Channel B primary on 7895 chipsets */ -#define CFSEAUTOTERM 0x0400 /* aic7890 Perform SE Auto Term */ -#define CFLVDSTERM 0x0800 /* aic7890 LVD Termination */ -/* UNUSED 0xF280 */ - unsigned short adapter_control; /* word 17 */ - -/* - * Bus Release, Host Adapter ID - */ -#define CFSCSIID 0x000F /* host adapter SCSI ID */ -/* UNUSED 0x00F0 */ -#define CFBRTIME 0xFF00 /* bus release time */ - unsigned short brtime_id; /* word 18 */ - -/* - * Maximum targets - */ -#define CFMAXTARG 0x00FF /* maximum targets */ -/* UNUSED 0xFF00 */ - unsigned short max_targets; /* word 19 */ - - unsigned short res_1[11]; /* words 20-30 */ - unsigned short checksum; /* word 31 */ -}; - -#define SELBUS_MASK 0x0a -#define SELNARROW 0x00 -#define SELBUSB 0x08 -#define SINGLE_BUS 0x00 - -#define SCB_TARGET(scb) \ - (((scb)->hscb->target_channel_lun & TID) >> 4) -#define SCB_LUN(scb) \ - ((scb)->hscb->target_channel_lun & LID) -#define SCB_IS_SCSIBUS_B(scb) \ - (((scb)->hscb->target_channel_lun & SELBUSB) != 0) - -/* - * If an error occurs during a data transfer phase, run the command - * to completion - it's easier that way - making a note of the error - * condition in this location. This then will modify a DID_OK status - * into an appropriate error for the higher-level SCSI code. - */ -#define aic7xxx_error(cmd) ((cmd)->SCp.Status) - -/* - * Keep track of the targets returned status. - */ -#define aic7xxx_status(cmd) ((cmd)->SCp.sent_command) - -/* - * The position of the SCSI commands scb within the scb array. - */ -#define aic7xxx_position(cmd) ((cmd)->SCp.have_data_in) - -/* - * The stored DMA mapping for single-buffer data transfers. - */ -#define aic7xxx_mapping(cmd) ((cmd)->SCp.phase) - -/* - * Get out private data area from a scsi cmd pointer - */ -#define AIC_DEV(cmd) ((struct aic_dev_data *)(cmd)->device->hostdata) - -/* - * So we can keep track of our host structs - */ -static struct aic7xxx_host *first_aic7xxx = NULL; - -/* - * As of Linux 2.1, the mid-level SCSI code uses virtual addresses - * in the scatter-gather lists. We need to convert the virtual - * addresses to physical addresses. - */ -struct hw_scatterlist { - unsigned int address; - unsigned int length; -}; - -/* - * Maximum number of SG segments these cards can support. - */ -#define AIC7XXX_MAX_SG 128 - -/* - * The maximum number of SCBs we could have for ANY type - * of card. DON'T FORGET TO CHANGE THE SCB MASK IN THE - * SEQUENCER CODE IF THIS IS MODIFIED! - */ -#define AIC7XXX_MAXSCB 255 - - -struct aic7xxx_hwscb { -/* ------------ Begin hardware supported fields ---------------- */ -/* 0*/ unsigned char control; -/* 1*/ unsigned char target_channel_lun; /* 4/1/3 bits */ -/* 2*/ unsigned char target_status; -/* 3*/ unsigned char SG_segment_count; -/* 4*/ unsigned int SG_list_pointer; -/* 8*/ unsigned char residual_SG_segment_count; -/* 9*/ unsigned char residual_data_count[3]; -/*12*/ unsigned int data_pointer; -/*16*/ unsigned int data_count; -/*20*/ unsigned int SCSI_cmd_pointer; -/*24*/ unsigned char SCSI_cmd_length; -/*25*/ unsigned char tag; /* Index into our kernel SCB array. - * Also used as the tag for tagged I/O - */ -#define SCB_PIO_TRANSFER_SIZE 26 /* amount we need to upload/download - * via PIO to initialize a transaction. - */ -/*26*/ unsigned char next; /* Used to thread SCBs awaiting selection - * or disconnected down in the sequencer. - */ -/*27*/ unsigned char prev; -/*28*/ unsigned int pad; /* - * Unused by the kernel, but we require - * the padding so that the array of - * hardware SCBs is aligned on 32 byte - * boundaries so the sequencer can index - */ -}; - -typedef enum { - SCB_FREE = 0x0000, - SCB_DTR_SCB = 0x0001, - SCB_WAITINGQ = 0x0002, - SCB_ACTIVE = 0x0004, - SCB_SENSE = 0x0008, - SCB_ABORT = 0x0010, - SCB_DEVICE_RESET = 0x0020, - SCB_RESET = 0x0040, - SCB_RECOVERY_SCB = 0x0080, - SCB_MSGOUT_PPR = 0x0100, - SCB_MSGOUT_SENT = 0x0200, - SCB_MSGOUT_SDTR = 0x0400, - SCB_MSGOUT_WDTR = 0x0800, - SCB_MSGOUT_BITS = SCB_MSGOUT_PPR | - SCB_MSGOUT_SENT | - SCB_MSGOUT_SDTR | - SCB_MSGOUT_WDTR, - SCB_QUEUED_ABORT = 0x1000, - SCB_QUEUED_FOR_DONE = 0x2000, - SCB_WAS_BUSY = 0x4000, - SCB_QUEUE_FULL = 0x8000 -} scb_flag_type; - -typedef enum { - AHC_FNONE = 0x00000000, - AHC_PAGESCBS = 0x00000001, - AHC_CHANNEL_B_PRIMARY = 0x00000002, - AHC_USEDEFAULTS = 0x00000004, - AHC_INDIRECT_PAGING = 0x00000008, - AHC_CHNLB = 0x00000020, - AHC_CHNLC = 0x00000040, - AHC_EXTEND_TRANS_A = 0x00000100, - AHC_EXTEND_TRANS_B = 0x00000200, - AHC_TERM_ENB_A = 0x00000400, - AHC_TERM_ENB_SE_LOW = 0x00000400, - AHC_TERM_ENB_B = 0x00000800, - AHC_TERM_ENB_SE_HIGH = 0x00000800, - AHC_HANDLING_REQINITS = 0x00001000, - AHC_TARGETMODE = 0x00002000, - AHC_NEWEEPROM_FMT = 0x00004000, - /* - * Here ends the FreeBSD defined flags and here begins the linux defined - * flags. NOTE: I did not preserve the old flag name during this change - * specifically to force me to evaluate what flags were being used properly - * and what flags weren't. This way, I could clean up the flag usage on - * a use by use basis. Doug Ledford - */ - AHC_MOTHERBOARD = 0x00020000, - AHC_NO_STPWEN = 0x00040000, - AHC_RESET_DELAY = 0x00080000, - AHC_A_SCANNED = 0x00100000, - AHC_B_SCANNED = 0x00200000, - AHC_MULTI_CHANNEL = 0x00400000, - AHC_BIOS_ENABLED = 0x00800000, - AHC_SEEPROM_FOUND = 0x01000000, - AHC_TERM_ENB_LVD = 0x02000000, - AHC_ABORT_PENDING = 0x04000000, - AHC_RESET_PENDING = 0x08000000, -#define AHC_IN_ISR_BIT 28 - AHC_IN_ISR = 0x10000000, - AHC_IN_ABORT = 0x20000000, - AHC_IN_RESET = 0x40000000, - AHC_EXTERNAL_SRAM = 0x80000000 -} ahc_flag_type; - -typedef enum { - AHC_NONE = 0x0000, - AHC_CHIPID_MASK = 0x00ff, - AHC_AIC7770 = 0x0001, - AHC_AIC7850 = 0x0002, - AHC_AIC7860 = 0x0003, - AHC_AIC7870 = 0x0004, - AHC_AIC7880 = 0x0005, - AHC_AIC7890 = 0x0006, - AHC_AIC7895 = 0x0007, - AHC_AIC7896 = 0x0008, - AHC_AIC7892 = 0x0009, - AHC_AIC7899 = 0x000a, - AHC_VL = 0x0100, - AHC_EISA = 0x0200, - AHC_PCI = 0x0400, -} ahc_chip; - -typedef enum { - AHC_FENONE = 0x0000, - AHC_ULTRA = 0x0001, - AHC_ULTRA2 = 0x0002, - AHC_WIDE = 0x0004, - AHC_TWIN = 0x0008, - AHC_MORE_SRAM = 0x0010, - AHC_CMD_CHAN = 0x0020, - AHC_QUEUE_REGS = 0x0040, - AHC_SG_PRELOAD = 0x0080, - AHC_SPIOCAP = 0x0100, - AHC_ULTRA3 = 0x0200, - AHC_NEW_AUTOTERM = 0x0400, - AHC_AIC7770_FE = AHC_FENONE, - AHC_AIC7850_FE = AHC_SPIOCAP, - AHC_AIC7860_FE = AHC_ULTRA|AHC_SPIOCAP, - AHC_AIC7870_FE = AHC_FENONE, - AHC_AIC7880_FE = AHC_ULTRA, - AHC_AIC7890_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2| - AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_NEW_AUTOTERM, - AHC_AIC7895_FE = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA, - AHC_AIC7896_FE = AHC_AIC7890_FE, - AHC_AIC7892_FE = AHC_AIC7890_FE|AHC_ULTRA3, - AHC_AIC7899_FE = AHC_AIC7890_FE|AHC_ULTRA3, -} ahc_feature; - -#define SCB_DMA_ADDR(scb, addr) ((unsigned long)(addr) + (scb)->scb_dma->dma_offset) - -struct aic7xxx_scb_dma { - unsigned long dma_offset; /* Correction you have to add - * to virtual address to get - * dma handle in this region */ - dma_addr_t dma_address; /* DMA handle of the start, - * for unmap */ - unsigned int dma_len; /* DMA length */ -}; - -typedef enum { - AHC_BUG_NONE = 0x0000, - AHC_BUG_TMODE_WIDEODD = 0x0001, - AHC_BUG_AUTOFLUSH = 0x0002, - AHC_BUG_CACHETHEN = 0x0004, - AHC_BUG_CACHETHEN_DIS = 0x0008, - AHC_BUG_PCI_2_1_RETRY = 0x0010, - AHC_BUG_PCI_MWI = 0x0020, - AHC_BUG_SCBCHAN_UPLOAD = 0x0040, -} ahc_bugs; - -struct aic7xxx_scb { - struct aic7xxx_hwscb *hscb; /* corresponding hardware scb */ - struct scsi_cmnd *cmd; /* scsi_cmnd for this scb */ - struct aic7xxx_scb *q_next; /* next scb in queue */ - volatile scb_flag_type flags; /* current state of scb */ - struct hw_scatterlist *sg_list; /* SG list in adapter format */ - unsigned char tag_action; - unsigned char sg_count; - unsigned char *sense_cmd; /* - * Allocate 6 characters for - * sense command. - */ - unsigned char *cmnd; - unsigned int sg_length; /* - * We init this during - * buildscb so we don't have - * to calculate anything during - * underflow/overflow/stat code - */ - void *kmalloc_ptr; - struct aic7xxx_scb_dma *scb_dma; -}; - -/* - * Define a linked list of SCBs. - */ -typedef struct { - struct aic7xxx_scb *head; - struct aic7xxx_scb *tail; -} scb_queue_type; - -static struct { - unsigned char errno; - const char *errmesg; -} hard_error[] = { - { ILLHADDR, "Illegal Host Access" }, - { ILLSADDR, "Illegal Sequencer Address referenced" }, - { ILLOPCODE, "Illegal Opcode in sequencer program" }, - { SQPARERR, "Sequencer Ram Parity Error" }, - { DPARERR, "Data-Path Ram Parity Error" }, - { MPARERR, "Scratch Ram/SCB Array Ram Parity Error" }, - { PCIERRSTAT,"PCI Error detected" }, - { CIOPARERR, "CIOBUS Parity Error" } -}; - -static unsigned char -generic_sense[] = { REQUEST_SENSE, 0, 0, 0, 255, 0 }; - -typedef struct { - scb_queue_type free_scbs; /* - * SCBs assigned to free slot on - * card (no paging required) - */ - struct aic7xxx_scb *scb_array[AIC7XXX_MAXSCB]; - struct aic7xxx_hwscb *hscbs; - unsigned char numscbs; /* current number of scbs */ - unsigned char maxhscbs; /* hardware scbs */ - unsigned char maxscbs; /* max scbs including pageable scbs */ - dma_addr_t hscbs_dma; /* DMA handle to hscbs */ - unsigned int hscbs_dma_len; /* length of the above DMA area */ - void *hscb_kmalloc_ptr; -} scb_data_type; - -struct target_cmd { - unsigned char mesg_bytes[4]; - unsigned char command[28]; -}; - -#define AHC_TRANS_CUR 0x0001 -#define AHC_TRANS_ACTIVE 0x0002 -#define AHC_TRANS_GOAL 0x0004 -#define AHC_TRANS_USER 0x0008 -#define AHC_TRANS_QUITE 0x0010 -typedef struct { - unsigned char width; - unsigned char period; - unsigned char offset; - unsigned char options; -} transinfo_type; - -struct aic_dev_data { - volatile scb_queue_type delayed_scbs; - volatile unsigned short temp_q_depth; - unsigned short max_q_depth; - volatile unsigned char active_cmds; - /* - * Statistics Kept: - * - * Total Xfers (count for each command that has a data xfer), - * broken down by reads && writes. - * - * Further sorted into a few bins for keeping tabs on how many commands - * we get of various sizes. - * - */ - long w_total; /* total writes */ - long r_total; /* total reads */ - long barrier_total; /* total num of REQ_BARRIER commands */ - long ordered_total; /* How many REQ_BARRIER commands we - used ordered tags to satisfy */ - long w_bins[6]; /* binned write */ - long r_bins[6]; /* binned reads */ - transinfo_type cur; - transinfo_type goal; -#define BUS_DEVICE_RESET_PENDING 0x01 -#define DEVICE_RESET_DELAY 0x02 -#define DEVICE_PRINT_DTR 0x04 -#define DEVICE_WAS_BUSY 0x08 -#define DEVICE_DTR_SCANNED 0x10 -#define DEVICE_SCSI_3 0x20 - volatile unsigned char flags; - unsigned needppr:1; - unsigned needppr_copy:1; - unsigned needsdtr:1; - unsigned needsdtr_copy:1; - unsigned needwdtr:1; - unsigned needwdtr_copy:1; - unsigned dtr_pending:1; - struct scsi_device *SDptr; - struct list_head list; -}; - -/* - * Define a structure used for each host adapter. Note, in order to avoid - * problems with architectures I can't test on (because I don't have one, - * such as the Alpha based systems) which happen to give faults for - * non-aligned memory accesses, care was taken to align this structure - * in a way that guaranteed all accesses larger than 8 bits were aligned - * on the appropriate boundary. It's also organized to try and be more - * cache line efficient. Be careful when changing this lest you might hurt - * overall performance and bring down the wrath of the masses. - */ -struct aic7xxx_host { - /* - * This is the first 64 bytes in the host struct - */ - - /* - * We are grouping things here....first, items that get either read or - * written with nearly every interrupt - */ - volatile long flags; - ahc_feature features; /* chip features */ - unsigned long base; /* card base address */ - volatile unsigned char __iomem *maddr; /* memory mapped address */ - unsigned long isr_count; /* Interrupt count */ - unsigned long spurious_int; - scb_data_type *scb_data; - struct aic7xxx_cmd_queue { - struct scsi_cmnd *head; - struct scsi_cmnd *tail; - } completeq; - - /* - * Things read/written on nearly every entry into aic7xxx_queue() - */ - volatile scb_queue_type waiting_scbs; - unsigned char unpause; /* unpause value for HCNTRL */ - unsigned char pause; /* pause value for HCNTRL */ - volatile unsigned char qoutfifonext; - volatile unsigned char activescbs; /* active scbs */ - volatile unsigned char max_activescbs; - volatile unsigned char qinfifonext; - volatile unsigned char *untagged_scbs; - volatile unsigned char *qoutfifo; - volatile unsigned char *qinfifo; - - unsigned char dev_last_queue_full[MAX_TARGETS]; - unsigned char dev_last_queue_full_count[MAX_TARGETS]; - unsigned short ultraenb; /* Gets downloaded to card as a bitmap */ - unsigned short discenable; /* Gets downloaded to card as a bitmap */ - transinfo_type user[MAX_TARGETS]; - - unsigned char msg_buf[13]; /* The message for the target */ - unsigned char msg_type; -#define MSG_TYPE_NONE 0x00 -#define MSG_TYPE_INITIATOR_MSGOUT 0x01 -#define MSG_TYPE_INITIATOR_MSGIN 0x02 - unsigned char msg_len; /* Length of message */ - unsigned char msg_index; /* Index into msg_buf array */ - - - /* - * We put the less frequently used host structure items - * after the more frequently used items to try and ease - * the burden on the cache subsystem. - * These entries are not *commonly* accessed, whereas - * the preceding entries are accessed very often. - */ - - unsigned int irq; /* IRQ for this adapter */ - int instance; /* aic7xxx instance number */ - int scsi_id; /* host adapter SCSI ID */ - int scsi_id_b; /* channel B for twin adapters */ - unsigned int bios_address; - int board_name_index; - unsigned short bios_control; /* bios control - SEEPROM */ - unsigned short adapter_control; /* adapter control - SEEPROM */ - struct pci_dev *pdev; - unsigned char pci_bus; - unsigned char pci_device_fn; - struct seeprom_config sc; - unsigned short sc_type; - unsigned short sc_size; - struct aic7xxx_host *next; /* allow for multiple IRQs */ - struct Scsi_Host *host; /* pointer to scsi host */ - struct list_head aic_devs; /* all aic_dev structs on host */ - int host_no; /* SCSI host number */ - unsigned long mbase; /* I/O memory address */ - ahc_chip chip; /* chip type */ - ahc_bugs bugs; - dma_addr_t fifo_dma; /* DMA handle for fifo arrays */ -}; - -/* - * Valid SCSIRATE values. (p. 3-17) - * Provides a mapping of transfer periods in ns/4 to the proper value to - * stick in the SCSIRATE reg to use that transfer rate. - */ -#define AHC_SYNCRATE_ULTRA3 0 -#define AHC_SYNCRATE_ULTRA2 1 -#define AHC_SYNCRATE_ULTRA 3 -#define AHC_SYNCRATE_FAST 6 -#define AHC_SYNCRATE_CRC 0x40 -#define AHC_SYNCRATE_SE 0x10 -static struct aic7xxx_syncrate { - /* Rates in Ultra mode have bit 8 of sxfr set */ -#define ULTRA_SXFR 0x100 - int sxfr_ultra2; - int sxfr; - unsigned char period; - const char *rate[2]; -} aic7xxx_syncrates[] = { - { 0x42, 0x000, 9, {"80.0", "160.0"} }, - { 0x13, 0x000, 10, {"40.0", "80.0"} }, - { 0x14, 0x000, 11, {"33.0", "66.6"} }, - { 0x15, 0x100, 12, {"20.0", "40.0"} }, - { 0x16, 0x110, 15, {"16.0", "32.0"} }, - { 0x17, 0x120, 18, {"13.4", "26.8"} }, - { 0x18, 0x000, 25, {"10.0", "20.0"} }, - { 0x19, 0x010, 31, {"8.0", "16.0"} }, - { 0x1a, 0x020, 37, {"6.67", "13.3"} }, - { 0x1b, 0x030, 43, {"5.7", "11.4"} }, - { 0x10, 0x040, 50, {"5.0", "10.0"} }, - { 0x00, 0x050, 56, {"4.4", "8.8" } }, - { 0x00, 0x060, 62, {"4.0", "8.0" } }, - { 0x00, 0x070, 68, {"3.6", "7.2" } }, - { 0x00, 0x000, 0, {NULL, NULL} }, -}; - -#define CTL_OF_SCB(scb) (((scb->hscb)->target_channel_lun >> 3) & 0x1), \ - (((scb->hscb)->target_channel_lun >> 4) & 0xf), \ - ((scb->hscb)->target_channel_lun & 0x07) - -#define CTL_OF_CMD(cmd) ((cmd->device->channel) & 0x01), \ - ((cmd->device->id) & 0x0f), \ - ((cmd->device->lun) & 0x07) - -#define TARGET_INDEX(cmd) ((cmd)->device->id | ((cmd)->device->channel << 3)) - -/* - * A nice little define to make doing our printks a little easier - */ - -#define WARN_LEAD KERN_WARNING "(scsi%d:%d:%d:%d) " -#define INFO_LEAD KERN_INFO "(scsi%d:%d:%d:%d) " - -/* - * XXX - these options apply unilaterally to _all_ 274x/284x/294x - * cards in the system. This should be fixed. Exceptions to this - * rule are noted in the comments. - */ - -/* - * Use this as the default queue depth when setting tagged queueing on. - */ -static unsigned int aic7xxx_default_queue_depth = AIC7XXX_CMDS_PER_DEVICE; - -/* - * Skip the scsi bus reset. Non 0 make us skip the reset at startup. This - * has no effect on any later resets that might occur due to things like - * SCSI bus timeouts. - */ -static unsigned int aic7xxx_no_reset = 0; -/* - * Certain PCI motherboards will scan PCI devices from highest to lowest, - * others scan from lowest to highest, and they tend to do all kinds of - * strange things when they come into contact with PCI bridge chips. The - * net result of all this is that the PCI card that is actually used to boot - * the machine is very hard to detect. Most motherboards go from lowest - * PCI slot number to highest, and the first SCSI controller found is the - * one you boot from. The only exceptions to this are when a controller - * has its BIOS disabled. So, we by default sort all of our SCSI controllers - * from lowest PCI slot number to highest PCI slot number. We also force - * all controllers with their BIOS disabled to the end of the list. This - * works on *almost* all computers. Where it doesn't work, we have this - * option. Setting this option to non-0 will reverse the order of the sort - * to highest first, then lowest, but will still leave cards with their BIOS - * disabled at the very end. That should fix everyone up unless there are - * really strange cirumstances. - */ -static int aic7xxx_reverse_scan = 0; -/* - * Should we force EXTENDED translation on a controller. - * 0 == Use whatever is in the SEEPROM or default to off - * 1 == Use whatever is in the SEEPROM or default to on - */ -static unsigned int aic7xxx_extended = 0; -/* - * The IRQ trigger method used on EISA controllers. Does not effect PCI cards. - * -1 = Use detected settings. - * 0 = Force Edge triggered mode. - * 1 = Force Level triggered mode. - */ -static int aic7xxx_irq_trigger = -1; -/* - * This variable is used to override the termination settings on a controller. - * This should not be used under normal conditions. However, in the case - * that a controller does not have a readable SEEPROM (so that we can't - * read the SEEPROM settings directly) and that a controller has a buggered - * version of the cable detection logic, this can be used to force the - * correct termination. It is preferable to use the manual termination - * settings in the BIOS if possible, but some motherboard controllers store - * those settings in a format we can't read. In other cases, auto term - * should also work, but the chipset was put together with no auto term - * logic (common on motherboard controllers). In those cases, we have - * 32 bits here to work with. That's good for 8 controllers/channels. The - * bits are organized as 4 bits per channel, with scsi0 getting the lowest - * 4 bits in the int. A 1 in a bit position indicates the termination setting - * that corresponds to that bit should be enabled, a 0 is disabled. - * It looks something like this: - * - * 0x0f = 1111-Single Ended Low Byte Termination on/off - * ||\-Single Ended High Byte Termination on/off - * |\-LVD Low Byte Termination on/off - * \-LVD High Byte Termination on/off - * - * For non-Ultra2 controllers, the upper 2 bits are not important. So, to - * enable both high byte and low byte termination on scsi0, I would need to - * make sure that the override_term variable was set to 0x03 (bits 0011). - * To make sure that all termination is enabled on an Ultra2 controller at - * scsi2 and only high byte termination on scsi1 and high and low byte - * termination on scsi0, I would set override_term=0xf23 (bits 1111 0010 0011) - * - * For the most part, users should never have to use this, that's why I - * left it fairly cryptic instead of easy to understand. If you need it, - * most likely someone will be telling you what your's needs to be set to. - */ -static int aic7xxx_override_term = -1; -/* - * Certain motherboard chipset controllers tend to screw - * up the polarity of the term enable output pin. Use this variable - * to force the correct polarity for your system. This is a bitfield variable - * similar to the previous one, but this one has one bit per channel instead - * of four. - * 0 = Force the setting to active low. - * 1 = Force setting to active high. - * Most Adaptec cards are active high, several motherboards are active low. - * To force a 2940 card at SCSI 0 to active high and a motherboard 7895 - * controller at scsi1 and scsi2 to active low, and a 2910 card at scsi3 - * to active high, you would need to set stpwlev=0x9 (bits 1001). - * - * People shouldn't need to use this, but if you are experiencing lots of - * SCSI timeout problems, this may help. There is one sure way to test what - * this option needs to be. Using a boot floppy to boot the system, configure - * your system to enable all SCSI termination (in the Adaptec SCSI BIOS) and - * if needed then also pass a value to override_term to make sure that the - * driver is enabling SCSI termination, then set this variable to either 0 - * or 1. When the driver boots, make sure there are *NO* SCSI cables - * connected to your controller. If it finds and inits the controller - * without problem, then the setting you passed to stpwlev was correct. If - * the driver goes into a reset loop and hangs the system, then you need the - * other setting for this variable. If neither setting lets the machine - * boot then you have definite termination problems that may not be fixable. - */ -static int aic7xxx_stpwlev = -1; -/* - * Set this to non-0 in order to force the driver to panic the kernel - * and print out debugging info on a SCSI abort or reset cycle. - */ -static int aic7xxx_panic_on_abort = 0; -/* - * PCI bus parity checking of the Adaptec controllers. This is somewhat - * dubious at best. To my knowledge, this option has never actually - * solved a PCI parity problem, but on certain machines with broken PCI - * chipset configurations, it can generate tons of false error messages. - * It's included in the driver for completeness. - * 0 = Shut off PCI parity check - * -1 = Normal polarity pci parity checking - * 1 = reverse polarity pci parity checking - * - * NOTE: you can't actually pass -1 on the lilo prompt. So, to set this - * variable to -1 you would actually want to simply pass the variable - * name without a number. That will invert the 0 which will result in - * -1. - */ -static int aic7xxx_pci_parity = 0; -/* - * Set this to any non-0 value to cause us to dump the contents of all - * the card's registers in a hex dump format tailored to each model of - * controller. - * - * NOTE: THE CONTROLLER IS LEFT IN AN UNUSABLE STATE BY THIS OPTION. - * YOU CANNOT BOOT UP WITH THIS OPTION, IT IS FOR DEBUGGING PURPOSES - * ONLY - */ -static int aic7xxx_dump_card = 0; -/* - * Set this to a non-0 value to make us dump out the 32 bit instruction - * registers on the card after completing the sequencer download. This - * allows the actual sequencer download to be verified. It is possible - * to use this option and still boot up and run your system. This is - * only intended for debugging purposes. - */ -static int aic7xxx_dump_sequencer = 0; -/* - * Certain newer motherboards have put new PCI based devices into the - * IO spaces that used to typically be occupied by VLB or EISA cards. - * This overlap can cause these newer motherboards to lock up when scanned - * for older EISA and VLB devices. Setting this option to non-0 will - * cause the driver to skip scanning for any VLB or EISA controllers and - * only support the PCI controllers. NOTE: this means that if the kernel - * os compiled with PCI support disabled, then setting this to non-0 - * would result in never finding any devices :) - */ -static int aic7xxx_no_probe = 0; -/* - * On some machines, enabling the external SCB RAM isn't reliable yet. I - * haven't had time to make test patches for things like changing the - * timing mode on that external RAM either. Some of those changes may - * fix the problem. Until then though, we default to external SCB RAM - * off and give a command line option to enable it. - */ -static int aic7xxx_scbram = 0; -/* - * So that we can set how long each device is given as a selection timeout. - * The table of values goes like this: - * 0 - 256ms - * 1 - 128ms - * 2 - 64ms - * 3 - 32ms - * We default to 64ms because it's fast. Some old SCSI-I devices need a - * longer time. The final value has to be left shifted by 3, hence 0x10 - * is the final value. - */ -static int aic7xxx_seltime = 0x10; -/* - * So that insmod can find the variable and make it point to something - */ -#ifdef MODULE -static char * aic7xxx = NULL; -module_param(aic7xxx, charp, 0); -#endif - -#define VERBOSE_NORMAL 0x0000 -#define VERBOSE_NEGOTIATION 0x0001 -#define VERBOSE_SEQINT 0x0002 -#define VERBOSE_SCSIINT 0x0004 -#define VERBOSE_PROBE 0x0008 -#define VERBOSE_PROBE2 0x0010 -#define VERBOSE_NEGOTIATION2 0x0020 -#define VERBOSE_MINOR_ERROR 0x0040 -#define VERBOSE_TRACING 0x0080 -#define VERBOSE_ABORT 0x0f00 -#define VERBOSE_ABORT_MID 0x0100 -#define VERBOSE_ABORT_FIND 0x0200 -#define VERBOSE_ABORT_PROCESS 0x0400 -#define VERBOSE_ABORT_RETURN 0x0800 -#define VERBOSE_RESET 0xf000 -#define VERBOSE_RESET_MID 0x1000 -#define VERBOSE_RESET_FIND 0x2000 -#define VERBOSE_RESET_PROCESS 0x4000 -#define VERBOSE_RESET_RETURN 0x8000 -static int aic7xxx_verbose = VERBOSE_NORMAL | VERBOSE_NEGOTIATION | - VERBOSE_PROBE; /* verbose messages */ - - -/**************************************************************************** - * - * We're going to start putting in function declarations so that order of - * functions is no longer important. As needed, they are added here. - * - ***************************************************************************/ - -static int aic7xxx_release(struct Scsi_Host *host); -static void aic7xxx_set_syncrate(struct aic7xxx_host *p, - struct aic7xxx_syncrate *syncrate, int target, int channel, - unsigned int period, unsigned int offset, unsigned char options, - unsigned int type, struct aic_dev_data *aic_dev); -static void aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, - int lun, unsigned int width, unsigned int type, - struct aic_dev_data *aic_dev); -static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd); -static void aic7xxx_print_card(struct aic7xxx_host *p); -static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p); -static void aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded); -#ifdef AIC7XXX_VERBOSE_DEBUGGING -static void aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer); -#endif - -/**************************************************************************** - * - * These functions are now used. They happen to be wrapped in useless - * inb/outb port read/writes around the real reads and writes because it - * seems that certain very fast CPUs have a problem dealing with us when - * going at full speed. - * - ***************************************************************************/ - -static unsigned char -aic_inb(struct aic7xxx_host *p, long port) -{ -#ifdef MMAPIO - unsigned char x; - if(p->maddr) - { - x = readb(p->maddr + port); - } - else - { - x = inb(p->base + port); - } - return(x); -#else - return(inb(p->base + port)); -#endif -} - -static void -aic_outb(struct aic7xxx_host *p, unsigned char val, long port) -{ -#ifdef MMAPIO - if(p->maddr) - { - writeb(val, p->maddr + port); - mb(); /* locked operation in order to force CPU ordering */ - readb(p->maddr + HCNTRL); /* dummy read to flush the PCI write */ - } - else - { - outb(val, p->base + port); - mb(); /* locked operation in order to force CPU ordering */ - } -#else - outb(val, p->base + port); - mb(); /* locked operation in order to force CPU ordering */ -#endif -} - -/*+F************************************************************************* - * Function: - * aic7xxx_setup - * - * Description: - * Handle Linux boot parameters. This routine allows for assigning a value - * to a parameter with a ':' between the parameter and the value. - * ie. aic7xxx=unpause:0x0A,extended - *-F*************************************************************************/ -static int -aic7xxx_setup(char *s) -{ - int i, n; - char *p; - char *end; - - static struct { - const char *name; - unsigned int *flag; - } options[] = { - { "extended", &aic7xxx_extended }, - { "no_reset", &aic7xxx_no_reset }, - { "irq_trigger", &aic7xxx_irq_trigger }, - { "verbose", &aic7xxx_verbose }, - { "reverse_scan",&aic7xxx_reverse_scan }, - { "override_term", &aic7xxx_override_term }, - { "stpwlev", &aic7xxx_stpwlev }, - { "no_probe", &aic7xxx_no_probe }, - { "panic_on_abort", &aic7xxx_panic_on_abort }, - { "pci_parity", &aic7xxx_pci_parity }, - { "dump_card", &aic7xxx_dump_card }, - { "dump_sequencer", &aic7xxx_dump_sequencer }, - { "default_queue_depth", &aic7xxx_default_queue_depth }, - { "scbram", &aic7xxx_scbram }, - { "seltime", &aic7xxx_seltime }, - { "tag_info", NULL } - }; - - end = strchr(s, '\0'); - - while ((p = strsep(&s, ",.")) != NULL) - { - for (i = 0; i < ARRAY_SIZE(options); i++) - { - n = strlen(options[i].name); - if (!strncmp(options[i].name, p, n)) - { - if (!strncmp(p, "tag_info", n)) - { - if (p[n] == ':') - { - char *base; - char *tok, *tok_end, *tok_end2; - char tok_list[] = { '.', ',', '{', '}', '\0' }; - int i, instance = -1, device = -1; - unsigned char done = FALSE; - - base = p; - tok = base + n + 1; /* Forward us just past the ':' */ - tok_end = strchr(tok, '\0'); - if (tok_end < end) - *tok_end = ','; - while(!done) - { - switch(*tok) - { - case '{': - if (instance == -1) - instance = 0; - else if (device == -1) - device = 0; - tok++; - break; - case '}': - if (device != -1) - device = -1; - else if (instance != -1) - instance = -1; - tok++; - break; - case ',': - case '.': - if (instance == -1) - done = TRUE; - else if (device >= 0) - device++; - else if (instance >= 0) - instance++; - if ( (device >= MAX_TARGETS) || - (instance >= ARRAY_SIZE(aic7xxx_tag_info)) ) - done = TRUE; - tok++; - if (!done) - { - base = tok; - } - break; - case '\0': - done = TRUE; - break; - default: - done = TRUE; - tok_end = strchr(tok, '\0'); - for(i=0; tok_list[i]; i++) - { - tok_end2 = strchr(tok, tok_list[i]); - if ( (tok_end2) && (tok_end2 < tok_end) ) - { - tok_end = tok_end2; - done = FALSE; - } - } - if ( (instance >= 0) && (device >= 0) && - (instance < ARRAY_SIZE(aic7xxx_tag_info)) && - (device < MAX_TARGETS) ) - aic7xxx_tag_info[instance].tag_commands[device] = - simple_strtoul(tok, NULL, 0) & 0xff; - tok = tok_end; - break; - } - } - while((p != base) && (p != NULL)) - p = strsep(&s, ",."); - } - } - else if (p[n] == ':') - { - *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); - if(!strncmp(p, "seltime", n)) - { - *(options[i].flag) = (*(options[i].flag) % 4) << 3; - } - } - else if (!strncmp(p, "verbose", n)) - { - *(options[i].flag) = 0xff29; - } - else - { - *(options[i].flag) = ~(*(options[i].flag)); - if(!strncmp(p, "seltime", n)) - { - *(options[i].flag) = (*(options[i].flag) % 4) << 3; - } - } - } - } - } - return 1; -} - -__setup("aic7xxx=", aic7xxx_setup); - -/*+F************************************************************************* - * Function: - * pause_sequencer - * - * Description: - * Pause the sequencer and wait for it to actually stop - this - * is important since the sequencer can disable pausing for critical - * sections. - *-F*************************************************************************/ -static void -pause_sequencer(struct aic7xxx_host *p) -{ - aic_outb(p, p->pause, HCNTRL); - while ((aic_inb(p, HCNTRL) & PAUSE) == 0) - { - ; - } - if(p->features & AHC_ULTRA2) - { - aic_inb(p, CCSCBCTL); - } -} - -/*+F************************************************************************* - * Function: - * unpause_sequencer - * - * Description: - * Unpause the sequencer. Unremarkable, yet done often enough to - * warrant an easy way to do it. - *-F*************************************************************************/ -static void -unpause_sequencer(struct aic7xxx_host *p, int unpause_always) -{ - if (unpause_always || - ( !(aic_inb(p, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) && - !(p->flags & AHC_HANDLING_REQINITS) ) ) - { - aic_outb(p, p->unpause, HCNTRL); - } -} - -/*+F************************************************************************* - * Function: - * restart_sequencer - * - * Description: - * Restart the sequencer program from address zero. This assumes - * that the sequencer is already paused. - *-F*************************************************************************/ -static void -restart_sequencer(struct aic7xxx_host *p) -{ - aic_outb(p, 0, SEQADDR0); - aic_outb(p, 0, SEQADDR1); - aic_outb(p, FASTMODE, SEQCTL); -} - -/* - * We include the aic7xxx_seq.c file here so that the other defines have - * already been made, and so that it comes before the code that actually - * downloads the instructions (since we don't typically use function - * prototype, our code has to be ordered that way, it's a left-over from - * the original driver days.....I should fix it some time DL). - */ -#include "aic7xxx_old/aic7xxx_seq.c" - -/*+F************************************************************************* - * Function: - * aic7xxx_check_patch - * - * Description: - * See if the next patch to download should be downloaded. - *-F*************************************************************************/ -static int -aic7xxx_check_patch(struct aic7xxx_host *p, - struct sequencer_patch **start_patch, int start_instr, int *skip_addr) -{ - struct sequencer_patch *cur_patch; - struct sequencer_patch *last_patch; - int num_patches; - - num_patches = ARRAY_SIZE(sequencer_patches); - last_patch = &sequencer_patches[num_patches]; - cur_patch = *start_patch; - - while ((cur_patch < last_patch) && (start_instr == cur_patch->begin)) - { - if (cur_patch->patch_func(p) == 0) - { - /* - * Start rejecting code. - */ - *skip_addr = start_instr + cur_patch->skip_instr; - cur_patch += cur_patch->skip_patch; - } - else - { - /* - * Found an OK patch. Advance the patch pointer to the next patch - * and wait for our instruction pointer to get here. - */ - cur_patch++; - } - } - - *start_patch = cur_patch; - if (start_instr < *skip_addr) - /* - * Still skipping - */ - return (0); - return(1); -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_download_instr - * - * Description: - * Find the next patch to download. - *-F*************************************************************************/ -static void -aic7xxx_download_instr(struct aic7xxx_host *p, int instrptr, - unsigned char *dconsts) -{ - union ins_formats instr; - struct ins_format1 *fmt1_ins; - struct ins_format3 *fmt3_ins; - unsigned char opcode; - - instr = *(union ins_formats*) &seqprog[instrptr * 4]; - - instr.integer = le32_to_cpu(instr.integer); - - fmt1_ins = &instr.format1; - fmt3_ins = NULL; - - /* Pull the opcode */ - opcode = instr.format1.opcode; - switch (opcode) - { - case AIC_OP_JMP: - case AIC_OP_JC: - case AIC_OP_JNC: - case AIC_OP_CALL: - case AIC_OP_JNE: - case AIC_OP_JNZ: - case AIC_OP_JE: - case AIC_OP_JZ: - { - struct sequencer_patch *cur_patch; - int address_offset; - unsigned int address; - int skip_addr; - int i; - - fmt3_ins = &instr.format3; - address_offset = 0; - address = fmt3_ins->address; - cur_patch = sequencer_patches; - skip_addr = 0; - - for (i = 0; i < address;) - { - aic7xxx_check_patch(p, &cur_patch, i, &skip_addr); - if (skip_addr > i) - { - int end_addr; - - end_addr = min_t(int, address, skip_addr); - address_offset += end_addr - i; - i = skip_addr; - } - else - { - i++; - } - } - address -= address_offset; - fmt3_ins->address = address; - /* Fall Through to the next code section */ - } - case AIC_OP_OR: - case AIC_OP_AND: - case AIC_OP_XOR: - case AIC_OP_ADD: - case AIC_OP_ADC: - case AIC_OP_BMOV: - if (fmt1_ins->parity != 0) - { - fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; - } - fmt1_ins->parity = 0; - /* Fall Through to the next code section */ - case AIC_OP_ROL: - if ((p->features & AHC_ULTRA2) != 0) - { - int i, count; - - /* Calculate odd parity for the instruction */ - for ( i=0, count=0; i < 31; i++) - { - unsigned int mask; - - mask = 0x01 << i; - if ((instr.integer & mask) != 0) - count++; - } - if (!(count & 0x01)) - instr.format1.parity = 1; - } - else - { - if (fmt3_ins != NULL) - { - instr.integer = fmt3_ins->immediate | - (fmt3_ins->source << 8) | - (fmt3_ins->address << 16) | - (fmt3_ins->opcode << 25); - } - else - { - instr.integer = fmt1_ins->immediate | - (fmt1_ins->source << 8) | - (fmt1_ins->destination << 16) | - (fmt1_ins->ret << 24) | - (fmt1_ins->opcode << 25); - } - } - aic_outb(p, (instr.integer & 0xff), SEQRAM); - aic_outb(p, ((instr.integer >> 8) & 0xff), SEQRAM); - aic_outb(p, ((instr.integer >> 16) & 0xff), SEQRAM); - aic_outb(p, ((instr.integer >> 24) & 0xff), SEQRAM); - udelay(10); - break; - - default: - panic("aic7xxx: Unknown opcode encountered in sequencer program."); - break; - } -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_loadseq - * - * Description: - * Load the sequencer code into the controller memory. - *-F*************************************************************************/ -static void -aic7xxx_loadseq(struct aic7xxx_host *p) -{ - struct sequencer_patch *cur_patch; - int i; - int downloaded; - int skip_addr; - unsigned char download_consts[4] = {0, 0, 0, 0}; - - if (aic7xxx_verbose & VERBOSE_PROBE) - { - printk(KERN_INFO "(scsi%d) Downloading sequencer code...", p->host_no); - } -#if 0 - download_consts[TMODE_NUMCMDS] = p->num_targetcmds; -#endif - download_consts[TMODE_NUMCMDS] = 0; - cur_patch = &sequencer_patches[0]; - downloaded = 0; - skip_addr = 0; - - aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL); - aic_outb(p, 0, SEQADDR0); - aic_outb(p, 0, SEQADDR1); - - for (i = 0; i < sizeof(seqprog) / 4; i++) - { - if (aic7xxx_check_patch(p, &cur_patch, i, &skip_addr) == 0) - { - /* Skip this instruction for this configuration. */ - continue; - } - aic7xxx_download_instr(p, i, &download_consts[0]); - downloaded++; - } - - aic_outb(p, 0, SEQADDR0); - aic_outb(p, 0, SEQADDR1); - aic_outb(p, FASTMODE | FAILDIS, SEQCTL); - unpause_sequencer(p, TRUE); - mdelay(1); - pause_sequencer(p); - aic_outb(p, FASTMODE, SEQCTL); - if (aic7xxx_verbose & VERBOSE_PROBE) - { - printk(" %d instructions downloaded\n", downloaded); - } - if (aic7xxx_dump_sequencer) - aic7xxx_print_sequencer(p, downloaded); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_print_sequencer - * - * Description: - * Print the contents of the sequencer memory to the screen. - *-F*************************************************************************/ -static void -aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded) -{ - int i, k, temp; - - aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL); - aic_outb(p, 0, SEQADDR0); - aic_outb(p, 0, SEQADDR1); - - k = 0; - for (i=0; i < downloaded; i++) - { - if ( k == 0 ) - printk("%03x: ", i); - temp = aic_inb(p, SEQRAM); - temp |= (aic_inb(p, SEQRAM) << 8); - temp |= (aic_inb(p, SEQRAM) << 16); - temp |= (aic_inb(p, SEQRAM) << 24); - printk("%08x", temp); - if ( ++k == 8 ) - { - printk("\n"); - k = 0; - } - else - printk(" "); - } - aic_outb(p, 0, SEQADDR0); - aic_outb(p, 0, SEQADDR1); - aic_outb(p, FASTMODE | FAILDIS, SEQCTL); - unpause_sequencer(p, TRUE); - mdelay(1); - pause_sequencer(p); - aic_outb(p, FASTMODE, SEQCTL); - printk("\n"); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_info - * - * Description: - * Return a string describing the driver. - *-F*************************************************************************/ -static const char * -aic7xxx_info(struct Scsi_Host *dooh) -{ - static char buffer[256]; - char *bp; - struct aic7xxx_host *p; - - bp = &buffer[0]; - p = (struct aic7xxx_host *)dooh->hostdata; - memset(bp, 0, sizeof(buffer)); - strcpy(bp, "Adaptec AHA274x/284x/294x (EISA/VLB/PCI-Fast SCSI) "); - strcat(bp, AIC7XXX_C_VERSION); - strcat(bp, "/"); - strcat(bp, AIC7XXX_H_VERSION); - strcat(bp, "\n"); - strcat(bp, " <"); - strcat(bp, board_names[p->board_name_index]); - strcat(bp, ">"); - - return(bp); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_find_syncrate - * - * Description: - * Look up the valid period to SCSIRATE conversion in our table - *-F*************************************************************************/ -static struct aic7xxx_syncrate * -aic7xxx_find_syncrate(struct aic7xxx_host *p, unsigned int *period, - unsigned int maxsync, unsigned char *options) -{ - struct aic7xxx_syncrate *syncrate; - int done = FALSE; - - switch(*options) - { - case MSG_EXT_PPR_OPTION_DT_CRC: - case MSG_EXT_PPR_OPTION_DT_UNITS: - if(!(p->features & AHC_ULTRA3)) - { - *options = 0; - maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2); - } - break; - case MSG_EXT_PPR_OPTION_DT_CRC_QUICK: - case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK: - if(!(p->features & AHC_ULTRA3)) - { - *options = 0; - maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2); - } - else - { - /* - * we don't support the Quick Arbitration variants of dual edge - * clocking. As it turns out, we want to send back the - * same basic option, but without the QA attribute. - * We know that we are responding because we would never set - * these options ourself, we would only respond to them. - */ - switch(*options) - { - case MSG_EXT_PPR_OPTION_DT_CRC_QUICK: - *options = MSG_EXT_PPR_OPTION_DT_CRC; - break; - case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK: - *options = MSG_EXT_PPR_OPTION_DT_UNITS; - break; - } - } - break; - default: - *options = 0; - maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2); - break; - } - syncrate = &aic7xxx_syncrates[maxsync]; - while ( (syncrate->rate[0] != NULL) && - (!(p->features & AHC_ULTRA2) || syncrate->sxfr_ultra2) ) - { - if (*period <= syncrate->period) - { - switch(*options) - { - case MSG_EXT_PPR_OPTION_DT_CRC: - case MSG_EXT_PPR_OPTION_DT_UNITS: - if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC)) - { - done = TRUE; - /* - * oops, we went too low for the CRC/DualEdge signalling, so - * clear the options byte - */ - *options = 0; - /* - * We'll be sending a reply to this packet to set the options - * properly, so unilaterally set the period as well. - */ - *period = syncrate->period; - } - else - { - done = TRUE; - if(syncrate == &aic7xxx_syncrates[maxsync]) - { - *period = syncrate->period; - } - } - break; - default: - if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC)) - { - done = TRUE; - if(syncrate == &aic7xxx_syncrates[maxsync]) - { - *period = syncrate->period; - } - } - break; - } - if(done) - { - break; - } - } - syncrate++; - } - if ( (*period == 0) || (syncrate->rate[0] == NULL) || - ((p->features & AHC_ULTRA2) && (syncrate->sxfr_ultra2 == 0)) ) - { - /* - * Use async transfers for this target - */ - *options = 0; - *period = 255; - syncrate = NULL; - } - return (syncrate); -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_find_period - * - * Description: - * Look up the valid SCSIRATE to period conversion in our table - *-F*************************************************************************/ -static unsigned int -aic7xxx_find_period(struct aic7xxx_host *p, unsigned int scsirate, - unsigned int maxsync) -{ - struct aic7xxx_syncrate *syncrate; - - if (p->features & AHC_ULTRA2) - { - scsirate &= SXFR_ULTRA2; - } - else - { - scsirate &= SXFR; - } - - syncrate = &aic7xxx_syncrates[maxsync]; - while (syncrate->rate[0] != NULL) - { - if (p->features & AHC_ULTRA2) - { - if (syncrate->sxfr_ultra2 == 0) - break; - else if (scsirate == syncrate->sxfr_ultra2) - return (syncrate->period); - else if (scsirate == (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC)) - return (syncrate->period); - } - else if (scsirate == (syncrate->sxfr & ~ULTRA_SXFR)) - { - return (syncrate->period); - } - syncrate++; - } - return (0); /* async */ -} - -/*+F************************************************************************* - * Function: - * aic7xxx_validate_offset - * - * Description: - * Set a valid offset value for a particular card in use and transfer - * settings in use. - *-F*************************************************************************/ -static void -aic7xxx_validate_offset(struct aic7xxx_host *p, - struct aic7xxx_syncrate *syncrate, unsigned int *offset, int wide) -{ - unsigned int maxoffset; - - /* Limit offset to what the card (and device) can do */ - if (syncrate == NULL) - { - maxoffset = 0; - } - else if (p->features & AHC_ULTRA2) - { - maxoffset = MAX_OFFSET_ULTRA2; - } - else - { - if (wide) - maxoffset = MAX_OFFSET_16BIT; - else - maxoffset = MAX_OFFSET_8BIT; - } - *offset = min(*offset, maxoffset); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_set_syncrate - * - * Description: - * Set the actual syncrate down in the card and in our host structs - *-F*************************************************************************/ -static void -aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate, - int target, int channel, unsigned int period, unsigned int offset, - unsigned char options, unsigned int type, struct aic_dev_data *aic_dev) -{ - unsigned char tindex; - unsigned short target_mask; - unsigned char lun, old_options; - unsigned int old_period, old_offset; - - tindex = target | (channel << 3); - target_mask = 0x01 << tindex; - lun = aic_inb(p, SCB_TCL) & 0x07; - - if (syncrate == NULL) - { - period = 0; - offset = 0; - } - - old_period = aic_dev->cur.period; - old_offset = aic_dev->cur.offset; - old_options = aic_dev->cur.options; - - - if (type & AHC_TRANS_CUR) - { - unsigned int scsirate; - - scsirate = aic_inb(p, TARG_SCSIRATE + tindex); - if (p->features & AHC_ULTRA2) - { - scsirate &= ~SXFR_ULTRA2; - if (syncrate != NULL) - { - switch(options) - { - case MSG_EXT_PPR_OPTION_DT_UNITS: - /* - * mask off the CRC bit in the xfer settings - */ - scsirate |= (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC); - break; - default: - scsirate |= syncrate->sxfr_ultra2; - break; - } - } - if (type & AHC_TRANS_ACTIVE) - { - aic_outb(p, offset, SCSIOFFSET); - } - aic_outb(p, offset, TARG_OFFSET + tindex); - } - else /* Not an Ultra2 controller */ - { - scsirate &= ~(SXFR|SOFS); - p->ultraenb &= ~target_mask; - if (syncrate != NULL) - { - if (syncrate->sxfr & ULTRA_SXFR) - { - p->ultraenb |= target_mask; - } - scsirate |= (syncrate->sxfr & SXFR); - scsirate |= (offset & SOFS); - } - if (type & AHC_TRANS_ACTIVE) - { - unsigned char sxfrctl0; - - sxfrctl0 = aic_inb(p, SXFRCTL0); - sxfrctl0 &= ~FAST20; - if (p->ultraenb & target_mask) - sxfrctl0 |= FAST20; - aic_outb(p, sxfrctl0, SXFRCTL0); - } - aic_outb(p, p->ultraenb & 0xff, ULTRA_ENB); - aic_outb(p, (p->ultraenb >> 8) & 0xff, ULTRA_ENB + 1 ); - } - if (type & AHC_TRANS_ACTIVE) - { - aic_outb(p, scsirate, SCSIRATE); - } - aic_outb(p, scsirate, TARG_SCSIRATE + tindex); - aic_dev->cur.period = period; - aic_dev->cur.offset = offset; - aic_dev->cur.options = options; - if ( !(type & AHC_TRANS_QUITE) && - (aic7xxx_verbose & VERBOSE_NEGOTIATION) && - (aic_dev->flags & DEVICE_PRINT_DTR) ) - { - if (offset) - { - int rate_mod = (scsirate & WIDEXFER) ? 1 : 0; - - printk(INFO_LEAD "Synchronous at %s Mbyte/sec, " - "offset %d.\n", p->host_no, channel, target, lun, - syncrate->rate[rate_mod], offset); - } - else - { - printk(INFO_LEAD "Using asynchronous transfers.\n", - p->host_no, channel, target, lun); - } - aic_dev->flags &= ~DEVICE_PRINT_DTR; - } - } - - if (type & AHC_TRANS_GOAL) - { - aic_dev->goal.period = period; - aic_dev->goal.offset = offset; - aic_dev->goal.options = options; - } - - if (type & AHC_TRANS_USER) - { - p->user[tindex].period = period; - p->user[tindex].offset = offset; - p->user[tindex].options = options; - } -} - -/*+F************************************************************************* - * Function: - * aic7xxx_set_width - * - * Description: - * Set the actual width down in the card and in our host structs - *-F*************************************************************************/ -static void -aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun, - unsigned int width, unsigned int type, struct aic_dev_data *aic_dev) -{ - unsigned char tindex; - unsigned short target_mask; - unsigned int old_width; - - tindex = target | (channel << 3); - target_mask = 1 << tindex; - - old_width = aic_dev->cur.width; - - if (type & AHC_TRANS_CUR) - { - unsigned char scsirate; - - scsirate = aic_inb(p, TARG_SCSIRATE + tindex); - - scsirate &= ~WIDEXFER; - if (width == MSG_EXT_WDTR_BUS_16_BIT) - scsirate |= WIDEXFER; - - aic_outb(p, scsirate, TARG_SCSIRATE + tindex); - - if (type & AHC_TRANS_ACTIVE) - aic_outb(p, scsirate, SCSIRATE); - - aic_dev->cur.width = width; - - if ( !(type & AHC_TRANS_QUITE) && - (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && - (aic_dev->flags & DEVICE_PRINT_DTR) ) - { - printk(INFO_LEAD "Using %s transfers\n", p->host_no, channel, target, - lun, (scsirate & WIDEXFER) ? "Wide(16bit)" : "Narrow(8bit)" ); - } - } - - if (type & AHC_TRANS_GOAL) - aic_dev->goal.width = width; - if (type & AHC_TRANS_USER) - p->user[tindex].width = width; - - if (aic_dev->goal.offset) - { - if (p->features & AHC_ULTRA2) - { - aic_dev->goal.offset = MAX_OFFSET_ULTRA2; - } - else if (width == MSG_EXT_WDTR_BUS_16_BIT) - { - aic_dev->goal.offset = MAX_OFFSET_16BIT; - } - else - { - aic_dev->goal.offset = MAX_OFFSET_8BIT; - } - } -} - -/*+F************************************************************************* - * Function: - * scbq_init - * - * Description: - * SCB queue initialization. - * - *-F*************************************************************************/ -static void -scbq_init(volatile scb_queue_type *queue) -{ - queue->head = NULL; - queue->tail = NULL; -} - -/*+F************************************************************************* - * Function: - * scbq_insert_head - * - * Description: - * Add an SCB to the head of the list. - * - *-F*************************************************************************/ -static inline void -scbq_insert_head(volatile scb_queue_type *queue, struct aic7xxx_scb *scb) -{ - scb->q_next = queue->head; - queue->head = scb; - if (queue->tail == NULL) /* If list was empty, update tail. */ - queue->tail = queue->head; -} - -/*+F************************************************************************* - * Function: - * scbq_remove_head - * - * Description: - * Remove an SCB from the head of the list. - * - *-F*************************************************************************/ -static inline struct aic7xxx_scb * -scbq_remove_head(volatile scb_queue_type *queue) -{ - struct aic7xxx_scb * scbp; - - scbp = queue->head; - if (queue->head != NULL) - queue->head = queue->head->q_next; - if (queue->head == NULL) /* If list is now empty, update tail. */ - queue->tail = NULL; - return(scbp); -} - -/*+F************************************************************************* - * Function: - * scbq_remove - * - * Description: - * Removes an SCB from the list. - * - *-F*************************************************************************/ -static inline void -scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb) -{ - if (queue->head == scb) - { - /* At beginning of queue, remove from head. */ - scbq_remove_head(queue); - } - else - { - struct aic7xxx_scb *curscb = queue->head; - - /* - * Search until the next scb is the one we're looking for, or - * we run out of queue. - */ - while ((curscb != NULL) && (curscb->q_next != scb)) - { - curscb = curscb->q_next; - } - if (curscb != NULL) - { - /* Found it. */ - curscb->q_next = scb->q_next; - if (scb->q_next == NULL) - { - /* Update the tail when removing the tail. */ - queue->tail = curscb; - } - } - } -} - -/*+F************************************************************************* - * Function: - * scbq_insert_tail - * - * Description: - * Add an SCB at the tail of the list. - * - *-F*************************************************************************/ -static inline void -scbq_insert_tail(volatile scb_queue_type *queue, struct aic7xxx_scb *scb) -{ - scb->q_next = NULL; - if (queue->tail != NULL) /* Add the scb at the end of the list. */ - queue->tail->q_next = scb; - queue->tail = scb; /* Update the tail. */ - if (queue->head == NULL) /* If list was empty, update head. */ - queue->head = queue->tail; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_match_scb - * - * Description: - * Checks to see if an scb matches the target/channel as specified. - * If target is ALL_TARGETS (-1), then we're looking for any device - * on the specified channel; this happens when a channel is going - * to be reset and all devices on that channel must be aborted. - *-F*************************************************************************/ -static int -aic7xxx_match_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb, - int target, int channel, int lun, unsigned char tag) -{ - int targ = (scb->hscb->target_channel_lun >> 4) & 0x0F; - int chan = (scb->hscb->target_channel_lun >> 3) & 0x01; - int slun = scb->hscb->target_channel_lun & 0x07; - int match; - - match = ((chan == channel) || (channel == ALL_CHANNELS)); - if (match != 0) - match = ((targ == target) || (target == ALL_TARGETS)); - if (match != 0) - match = ((lun == slun) || (lun == ALL_LUNS)); - if (match != 0) - match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); - - return (match); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_add_curscb_to_free_list - * - * Description: - * Adds the current scb (in SCBPTR) to the list of free SCBs. - *-F*************************************************************************/ -static void -aic7xxx_add_curscb_to_free_list(struct aic7xxx_host *p) -{ - /* - * Invalidate the tag so that aic7xxx_find_scb doesn't think - * it's active - */ - aic_outb(p, SCB_LIST_NULL, SCB_TAG); - aic_outb(p, 0, SCB_CONTROL); - - aic_outb(p, aic_inb(p, FREE_SCBH), SCB_NEXT); - aic_outb(p, aic_inb(p, SCBPTR), FREE_SCBH); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_rem_scb_from_disc_list - * - * Description: - * Removes the current SCB from the disconnected list and adds it - * to the free list. - *-F*************************************************************************/ -static unsigned char -aic7xxx_rem_scb_from_disc_list(struct aic7xxx_host *p, unsigned char scbptr, - unsigned char prev) -{ - unsigned char next; - - aic_outb(p, scbptr, SCBPTR); - next = aic_inb(p, SCB_NEXT); - aic7xxx_add_curscb_to_free_list(p); - - if (prev != SCB_LIST_NULL) - { - aic_outb(p, prev, SCBPTR); - aic_outb(p, next, SCB_NEXT); - } - else - { - aic_outb(p, next, DISCONNECTED_SCBH); - } - - return next; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_busy_target - * - * Description: - * Set the specified target busy. - *-F*************************************************************************/ -static inline void -aic7xxx_busy_target(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - p->untagged_scbs[scb->hscb->target_channel_lun] = scb->hscb->tag; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_index_busy_target - * - * Description: - * Returns the index of the busy target, and optionally sets the - * target inactive. - *-F*************************************************************************/ -static inline unsigned char -aic7xxx_index_busy_target(struct aic7xxx_host *p, unsigned char tcl, - int unbusy) -{ - unsigned char busy_scbid; - - busy_scbid = p->untagged_scbs[tcl]; - if (unbusy) - { - p->untagged_scbs[tcl] = SCB_LIST_NULL; - } - return (busy_scbid); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_find_scb - * - * Description: - * Look through the SCB array of the card and attempt to find the - * hardware SCB that corresponds to the passed in SCB. Return - * SCB_LIST_NULL if unsuccessful. This routine assumes that the - * card is already paused. - *-F*************************************************************************/ -static unsigned char -aic7xxx_find_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - unsigned char saved_scbptr; - unsigned char curindex; - - saved_scbptr = aic_inb(p, SCBPTR); - curindex = 0; - for (curindex = 0; curindex < p->scb_data->maxhscbs; curindex++) - { - aic_outb(p, curindex, SCBPTR); - if (aic_inb(p, SCB_TAG) == scb->hscb->tag) - { - break; - } - } - aic_outb(p, saved_scbptr, SCBPTR); - if (curindex >= p->scb_data->maxhscbs) - { - curindex = SCB_LIST_NULL; - } - - return (curindex); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_allocate_scb - * - * Description: - * Get an SCB from the free list or by allocating a new one. - *-F*************************************************************************/ -static int -aic7xxx_allocate_scb(struct aic7xxx_host *p) -{ - struct aic7xxx_scb *scbp = NULL; - int scb_size = (sizeof (struct hw_scatterlist) * AIC7XXX_MAX_SG) + 12 + 6; - int i; - int step = PAGE_SIZE / 1024; - unsigned long scb_count = 0; - struct hw_scatterlist *hsgp; - struct aic7xxx_scb *scb_ap; - struct aic7xxx_scb_dma *scb_dma; - unsigned char *bufs; - - if (p->scb_data->numscbs < p->scb_data->maxscbs) - { - /* - * Calculate the optimal number of SCBs to allocate. - * - * NOTE: This formula works because the sizeof(sg_array) is always - * 1024. Therefore, scb_size * i would always be > PAGE_SIZE * - * (i/step). The (i-1) allows the left hand side of the equation - * to grow into the right hand side to a point of near perfect - * efficiency since scb_size * (i -1) is growing slightly faster - * than the right hand side. If the number of SG array elements - * is changed, this function may not be near so efficient any more. - * - * Since the DMA'able buffers are now allocated in a separate - * chunk this algorithm has been modified to match. The '12' - * and '6' factors in scb_size are for the DMA'able command byte - * and sensebuffers respectively. -DaveM - */ - for ( i=step;; i *= 2 ) - { - if ( (scb_size * (i-1)) >= ( (PAGE_SIZE * (i/step)) - 64 ) ) - { - i /= 2; - break; - } - } - scb_count = min( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs); - scb_ap = kmalloc(sizeof (struct aic7xxx_scb) * scb_count - + sizeof(struct aic7xxx_scb_dma), GFP_ATOMIC); - if (scb_ap == NULL) - return(0); - scb_dma = (struct aic7xxx_scb_dma *)&scb_ap[scb_count]; - hsgp = (struct hw_scatterlist *) - pci_alloc_consistent(p->pdev, scb_size * scb_count, - &scb_dma->dma_address); - if (hsgp == NULL) - { - kfree(scb_ap); - return(0); - } - bufs = (unsigned char *)&hsgp[scb_count * AIC7XXX_MAX_SG]; -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - { - if (p->scb_data->numscbs == 0) - printk(INFO_LEAD "Allocating initial %ld SCB structures.\n", - p->host_no, -1, -1, -1, scb_count); - else - printk(INFO_LEAD "Allocating %ld additional SCB structures.\n", - p->host_no, -1, -1, -1, scb_count); - } -#endif - memset(scb_ap, 0, sizeof (struct aic7xxx_scb) * scb_count); - scb_dma->dma_offset = (unsigned long)scb_dma->dma_address - - (unsigned long)hsgp; - scb_dma->dma_len = scb_size * scb_count; - for (i=0; i < scb_count; i++) - { - scbp = &scb_ap[i]; - scbp->hscb = &p->scb_data->hscbs[p->scb_data->numscbs]; - scbp->sg_list = &hsgp[i * AIC7XXX_MAX_SG]; - scbp->sense_cmd = bufs; - scbp->cmnd = bufs + 6; - bufs += 12 + 6; - scbp->scb_dma = scb_dma; - memset(scbp->hscb, 0, sizeof(struct aic7xxx_hwscb)); - scbp->hscb->tag = p->scb_data->numscbs; - /* - * Place in the scb array; never is removed - */ - p->scb_data->scb_array[p->scb_data->numscbs++] = scbp; - scbq_insert_tail(&p->scb_data->free_scbs, scbp); - } - scbp->kmalloc_ptr = scb_ap; - } - return(scb_count); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_queue_cmd_complete - * - * Description: - * Due to race conditions present in the SCSI subsystem, it is easier - * to queue completed commands, then call scsi_done() on them when - * we're finished. This function queues the completed commands. - *-F*************************************************************************/ -static void -aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, struct scsi_cmnd *cmd) -{ - aic7xxx_position(cmd) = SCB_LIST_NULL; - cmd->host_scribble = (char *)p->completeq.head; - p->completeq.head = cmd; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_done_cmds_complete - * - * Description: - * Process the completed command queue. - *-F*************************************************************************/ -static void aic7xxx_done_cmds_complete(struct aic7xxx_host *p) -{ - struct scsi_cmnd *cmd; - - while (p->completeq.head != NULL) { - cmd = p->completeq.head; - p->completeq.head = (struct scsi_cmnd *) cmd->host_scribble; - cmd->host_scribble = NULL; - cmd->scsi_done(cmd); - } -} - -/*+F************************************************************************* - * Function: - * aic7xxx_free_scb - * - * Description: - * Free the scb and insert into the free scb list. - *-F*************************************************************************/ -static void -aic7xxx_free_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - - scb->flags = SCB_FREE; - scb->cmd = NULL; - scb->sg_count = 0; - scb->sg_length = 0; - scb->tag_action = 0; - scb->hscb->control = 0; - scb->hscb->target_status = 0; - scb->hscb->target_channel_lun = SCB_LIST_NULL; - - scbq_insert_head(&p->scb_data->free_scbs, scb); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_done - * - * Description: - * Calls the higher level scsi done function and frees the scb. - *-F*************************************************************************/ -static void -aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - struct scsi_cmnd *cmd = scb->cmd; - struct aic_dev_data *aic_dev = cmd->device->hostdata; - int tindex = TARGET_INDEX(cmd); - struct aic7xxx_scb *scbp; - unsigned char queue_depth; - - scsi_dma_unmap(cmd); - - if (scb->flags & SCB_SENSE) - { - pci_unmap_single(p->pdev, - le32_to_cpu(scb->sg_list[0].address), - SCSI_SENSE_BUFFERSIZE, - PCI_DMA_FROMDEVICE); - } - if (scb->flags & SCB_RECOVERY_SCB) - { - p->flags &= ~AHC_ABORT_PENDING; - } - if (scb->flags & (SCB_RESET|SCB_ABORT)) - { - cmd->result |= (DID_RESET << 16); - } - - if ((scb->flags & SCB_MSGOUT_BITS) != 0) - { - unsigned short mask; - int message_error = FALSE; - - mask = 0x01 << tindex; - - /* - * Check to see if we get an invalid message or a message error - * after failing to negotiate a wide or sync transfer message. - */ - if ((scb->flags & SCB_SENSE) && - ((scb->cmd->sense_buffer[12] == 0x43) || /* INVALID_MESSAGE */ - (scb->cmd->sense_buffer[12] == 0x49))) /* MESSAGE_ERROR */ - { - message_error = TRUE; - } - - if (scb->flags & SCB_MSGOUT_WDTR) - { - if (message_error) - { - if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && - (aic_dev->flags & DEVICE_PRINT_DTR) ) - { - printk(INFO_LEAD "Device failed to complete Wide Negotiation " - "processing and\n", p->host_no, CTL_OF_SCB(scb)); - printk(INFO_LEAD "returned a sense error code for invalid message, " - "disabling future\n", p->host_no, CTL_OF_SCB(scb)); - printk(INFO_LEAD "Wide negotiation to this device.\n", p->host_no, - CTL_OF_SCB(scb)); - } - aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; - } - } - if (scb->flags & SCB_MSGOUT_SDTR) - { - if (message_error) - { - if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && - (aic_dev->flags & DEVICE_PRINT_DTR) ) - { - printk(INFO_LEAD "Device failed to complete Sync Negotiation " - "processing and\n", p->host_no, CTL_OF_SCB(scb)); - printk(INFO_LEAD "returned a sense error code for invalid message, " - "disabling future\n", p->host_no, CTL_OF_SCB(scb)); - printk(INFO_LEAD "Sync negotiation to this device.\n", p->host_no, - CTL_OF_SCB(scb)); - aic_dev->flags &= ~DEVICE_PRINT_DTR; - } - aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; - } - } - if (scb->flags & SCB_MSGOUT_PPR) - { - if(message_error) - { - if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && - (aic_dev->flags & DEVICE_PRINT_DTR) ) - { - printk(INFO_LEAD "Device failed to complete Parallel Protocol " - "Request processing and\n", p->host_no, CTL_OF_SCB(scb)); - printk(INFO_LEAD "returned a sense error code for invalid message, " - "disabling future\n", p->host_no, CTL_OF_SCB(scb)); - printk(INFO_LEAD "Parallel Protocol Request negotiation to this " - "device.\n", p->host_no, CTL_OF_SCB(scb)); - } - /* - * Disable PPR negotiation and revert back to WDTR and SDTR setup - */ - aic_dev->needppr = aic_dev->needppr_copy = 0; - aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; - aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; - } - } - } - - queue_depth = aic_dev->temp_q_depth; - if (queue_depth >= aic_dev->active_cmds) - { - scbp = scbq_remove_head(&aic_dev->delayed_scbs); - if (scbp) - { - if (queue_depth == 1) - { - /* - * Give extra preference to untagged devices, such as CD-R devices - * This makes it more likely that a drive *won't* stuff up while - * waiting on data at a critical time, such as CD-R writing and - * audio CD ripping operations. Should also benefit tape drives. - */ - scbq_insert_head(&p->waiting_scbs, scbp); - } - else - { - scbq_insert_tail(&p->waiting_scbs, scbp); - } -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - printk(INFO_LEAD "Moving SCB from delayed to waiting queue.\n", - p->host_no, CTL_OF_SCB(scbp)); -#endif - if (queue_depth > aic_dev->active_cmds) - { - scbp = scbq_remove_head(&aic_dev->delayed_scbs); - if (scbp) - scbq_insert_tail(&p->waiting_scbs, scbp); - } - } - } - if (!(scb->tag_action)) - { - aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, - /* unbusy */ TRUE); - if (cmd->device->simple_tags) - { - aic_dev->temp_q_depth = aic_dev->max_q_depth; - } - } - if(scb->flags & SCB_DTR_SCB) - { - aic_dev->dtr_pending = 0; - } - aic_dev->active_cmds--; - p->activescbs--; - - if ((scb->sg_length >= 512) && (((cmd->result >> 16) & 0xf) == DID_OK)) - { - long *ptr; - int x, i; - - - if (rq_data_dir(cmd->request) == WRITE) - { - aic_dev->w_total++; - ptr = aic_dev->w_bins; - } - else - { - aic_dev->r_total++; - ptr = aic_dev->r_bins; - } - x = scb->sg_length; - x >>= 10; - for(i=0; i<6; i++) - { - x >>= 2; - if(!x) { - ptr[i]++; - break; - } - } - if(i == 6 && x) - ptr[5]++; - } - aic7xxx_free_scb(p, scb); - aic7xxx_queue_cmd_complete(p, cmd); - -} - -/*+F************************************************************************* - * Function: - * aic7xxx_run_done_queue - * - * Description: - * Calls the aic7xxx_done() for the scsi_cmnd of each scb in the - * aborted list, and adds each scb to the free list. If complete - * is TRUE, we also process the commands complete list. - *-F*************************************************************************/ -static void -aic7xxx_run_done_queue(struct aic7xxx_host *p, /*complete*/ int complete) -{ - struct aic7xxx_scb *scb; - int i, found = 0; - - for (i = 0; i < p->scb_data->numscbs; i++) - { - scb = p->scb_data->scb_array[i]; - if (scb->flags & SCB_QUEUED_FOR_DONE) - { - if (scb->flags & SCB_QUEUE_FULL) - { - scb->cmd->result = QUEUE_FULL << 1; - } - else - { - if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) - printk(INFO_LEAD "Aborting scb %d\n", - p->host_no, CTL_OF_SCB(scb), scb->hscb->tag); - /* - * Clear any residual information since the normal aic7xxx_done() path - * doesn't touch the residuals. - */ - scb->hscb->residual_SG_segment_count = 0; - scb->hscb->residual_data_count[0] = 0; - scb->hscb->residual_data_count[1] = 0; - scb->hscb->residual_data_count[2] = 0; - } - found++; - aic7xxx_done(p, scb); - } - } - if (aic7xxx_verbose & (VERBOSE_ABORT_RETURN | VERBOSE_RESET_RETURN)) - { - printk(INFO_LEAD "%d commands found and queued for " - "completion.\n", p->host_no, -1, -1, -1, found); - } - if (complete) - { - aic7xxx_done_cmds_complete(p); - } -} - -/*+F************************************************************************* - * Function: - * aic7xxx_abort_waiting_scb - * - * Description: - * Manipulate the waiting for selection list and return the - * scb that follows the one that we remove. - *-F*************************************************************************/ -static unsigned char -aic7xxx_abort_waiting_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb, - unsigned char scbpos, unsigned char prev) -{ - unsigned char curscb, next; - - /* - * Select the SCB we want to abort and pull the next pointer out of it. - */ - curscb = aic_inb(p, SCBPTR); - aic_outb(p, scbpos, SCBPTR); - next = aic_inb(p, SCB_NEXT); - - aic7xxx_add_curscb_to_free_list(p); - - /* - * Update the waiting list - */ - if (prev == SCB_LIST_NULL) - { - /* - * First in the list - */ - aic_outb(p, next, WAITING_SCBH); - } - else - { - /* - * Select the scb that pointed to us and update its next pointer. - */ - aic_outb(p, prev, SCBPTR); - aic_outb(p, next, SCB_NEXT); - } - /* - * Point us back at the original scb position and inform the SCSI - * system that the command has been aborted. - */ - aic_outb(p, curscb, SCBPTR); - return (next); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_search_qinfifo - * - * Description: - * Search the queue-in FIFO for matching SCBs and conditionally - * requeue. Returns the number of matching SCBs. - *-F*************************************************************************/ -static int -aic7xxx_search_qinfifo(struct aic7xxx_host *p, int target, int channel, - int lun, unsigned char tag, int flags, int requeue, - volatile scb_queue_type *queue) -{ - int found; - unsigned char qinpos, qintail; - struct aic7xxx_scb *scbp; - - found = 0; - qinpos = aic_inb(p, QINPOS); - qintail = p->qinfifonext; - - p->qinfifonext = qinpos; - - while (qinpos != qintail) - { - scbp = p->scb_data->scb_array[p->qinfifo[qinpos++]]; - if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) - { - /* - * We found an scb that needs to be removed. - */ - if (requeue && (queue != NULL)) - { - if (scbp->flags & SCB_WAITINGQ) - { - scbq_remove(queue, scbp); - scbq_remove(&p->waiting_scbs, scbp); - scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp); - AIC_DEV(scbp->cmd)->active_cmds++; - p->activescbs++; - } - scbq_insert_tail(queue, scbp); - AIC_DEV(scbp->cmd)->active_cmds--; - p->activescbs--; - scbp->flags |= SCB_WAITINGQ; - if ( !(scbp->tag_action & TAG_ENB) ) - { - aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun, - TRUE); - } - } - else if (requeue) - { - p->qinfifo[p->qinfifonext++] = scbp->hscb->tag; - } - else - { - /* - * Preserve any SCB_RECOVERY_SCB flags on this scb then set the - * flags we were called with, presumeably so aic7xxx_run_done_queue - * can find this scb - */ - scbp->flags = flags | (scbp->flags & SCB_RECOVERY_SCB); - if (aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun, - FALSE) == scbp->hscb->tag) - { - aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun, - TRUE); - } - } - found++; - } - else - { - p->qinfifo[p->qinfifonext++] = scbp->hscb->tag; - } - } - /* - * Now that we've done the work, clear out any left over commands in the - * qinfifo and update the KERNEL_QINPOS down on the card. - * - * NOTE: This routine expect the sequencer to already be paused when - * it is run....make sure it's that way! - */ - qinpos = p->qinfifonext; - while(qinpos != qintail) - { - p->qinfifo[qinpos++] = SCB_LIST_NULL; - } - if (p->features & AHC_QUEUE_REGS) - aic_outb(p, p->qinfifonext, HNSCB_QOFF); - else - aic_outb(p, p->qinfifonext, KERNEL_QINPOS); - - return (found); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_scb_on_qoutfifo - * - * Description: - * Is the scb that was passed to us currently on the qoutfifo? - *-F*************************************************************************/ -static int -aic7xxx_scb_on_qoutfifo(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - int i=0; - - while(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] != SCB_LIST_NULL) - { - if(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] == scb->hscb->tag) - return TRUE; - else - i++; - } - return FALSE; -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_reset_device - * - * Description: - * The device at the given target/channel has been reset. Abort - * all active and queued scbs for that target/channel. This function - * need not worry about linked next pointers because if was a MSG_ABORT_TAG - * then we had a tagged command (no linked next), if it was MSG_ABORT or - * MSG_BUS_DEV_RESET then the device won't know about any commands any more - * and no busy commands will exist, and if it was a bus reset, then nothing - * knows about any linked next commands any more. In all cases, we don't - * need to worry about the linked next or busy scb, we just need to clear - * them. - *-F*************************************************************************/ -static void -aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel, - int lun, unsigned char tag) -{ - struct aic7xxx_scb *scbp, *prev_scbp; - struct scsi_device *sd; - unsigned char active_scb, tcl, scb_tag; - int i = 0, init_lists = FALSE; - struct aic_dev_data *aic_dev; - - /* - * Restore this when we're done - */ - active_scb = aic_inb(p, SCBPTR); - scb_tag = aic_inb(p, SCB_TAG); - - if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS)) - { - printk(INFO_LEAD "Reset device, hardware_scb %d,\n", - p->host_no, channel, target, lun, active_scb); - printk(INFO_LEAD "Current scb %d, SEQADDR 0x%x, LASTPHASE " - "0x%x\n", - p->host_no, channel, target, lun, scb_tag, - aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), - aic_inb(p, LASTPHASE)); - printk(INFO_LEAD "SG_CACHEPTR 0x%x, SG_COUNT %d, SCSISIGI 0x%x\n", - p->host_no, channel, target, lun, - (p->features & AHC_ULTRA2) ? aic_inb(p, SG_CACHEPTR) : 0, - aic_inb(p, SG_COUNT), aic_inb(p, SCSISIGI)); - printk(INFO_LEAD "SSTAT0 0x%x, SSTAT1 0x%x, SSTAT2 0x%x\n", - p->host_no, channel, target, lun, aic_inb(p, SSTAT0), - aic_inb(p, SSTAT1), aic_inb(p, SSTAT2)); - } - - /* - * Deal with the busy target and linked next issues. - */ - list_for_each_entry(aic_dev, &p->aic_devs, list) - { - if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS)) - printk(INFO_LEAD "processing aic_dev %p\n", p->host_no, channel, target, - lun, aic_dev); - sd = aic_dev->SDptr; - - if((target != ALL_TARGETS && target != sd->id) || - (channel != ALL_CHANNELS && channel != sd->channel)) - continue; - if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) - printk(INFO_LEAD "Cleaning up status information " - "and delayed_scbs.\n", p->host_no, sd->channel, sd->id, sd->lun); - aic_dev->flags &= ~BUS_DEVICE_RESET_PENDING; - if ( tag == SCB_LIST_NULL ) - { - aic_dev->dtr_pending = 0; - aic_dev->needppr = aic_dev->needppr_copy; - aic_dev->needsdtr = aic_dev->needsdtr_copy; - aic_dev->needwdtr = aic_dev->needwdtr_copy; - aic_dev->flags = DEVICE_PRINT_DTR; - aic_dev->temp_q_depth = aic_dev->max_q_depth; - } - tcl = (sd->id << 4) | (sd->channel << 3) | sd->lun; - if ( (aic7xxx_index_busy_target(p, tcl, FALSE) == tag) || - (tag == SCB_LIST_NULL) ) - aic7xxx_index_busy_target(p, tcl, /* unbusy */ TRUE); - prev_scbp = NULL; - scbp = aic_dev->delayed_scbs.head; - while (scbp != NULL) - { - prev_scbp = scbp; - scbp = scbp->q_next; - if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag)) - { - scbq_remove(&aic_dev->delayed_scbs, prev_scbp); - if (prev_scbp->flags & SCB_WAITINGQ) - { - aic_dev->active_cmds++; - p->activescbs++; - } - prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); - prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; - } - } - } - - if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) - printk(INFO_LEAD "Cleaning QINFIFO.\n", p->host_no, channel, target, lun ); - aic7xxx_search_qinfifo(p, target, channel, lun, tag, - SCB_RESET | SCB_QUEUED_FOR_DONE, /* requeue */ FALSE, NULL); - -/* - * Search the waiting_scbs queue for matches, this catches any SCB_QUEUED - * ABORT/RESET commands. - */ - if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) - printk(INFO_LEAD "Cleaning waiting_scbs.\n", p->host_no, channel, - target, lun ); - { - struct aic7xxx_scb *scbp, *prev_scbp; - - prev_scbp = NULL; - scbp = p->waiting_scbs.head; - while (scbp != NULL) - { - prev_scbp = scbp; - scbp = scbp->q_next; - if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag)) - { - scbq_remove(&p->waiting_scbs, prev_scbp); - if (prev_scbp->flags & SCB_WAITINGQ) - { - AIC_DEV(prev_scbp->cmd)->active_cmds++; - p->activescbs++; - } - prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); - prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; - } - } - } - - - /* - * Search waiting for selection list. - */ - if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) - printk(INFO_LEAD "Cleaning waiting for selection " - "list.\n", p->host_no, channel, target, lun); - { - unsigned char next, prev, scb_index; - - next = aic_inb(p, WAITING_SCBH); /* Start at head of list. */ - prev = SCB_LIST_NULL; - while (next != SCB_LIST_NULL) - { - aic_outb(p, next, SCBPTR); - scb_index = aic_inb(p, SCB_TAG); - if (scb_index >= p->scb_data->numscbs) - { - /* - * No aic7xxx_verbose check here.....we want to see this since it - * means either the kernel driver or the sequencer screwed things up - */ - printk(WARN_LEAD "Waiting List inconsistency; SCB index=%d, " - "numscbs=%d\n", p->host_no, channel, target, lun, scb_index, - p->scb_data->numscbs); - next = aic_inb(p, SCB_NEXT); - aic7xxx_add_curscb_to_free_list(p); - } - else - { - scbp = p->scb_data->scb_array[scb_index]; - if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) - { - next = aic7xxx_abort_waiting_scb(p, scbp, next, prev); - if (scbp->flags & SCB_WAITINGQ) - { - AIC_DEV(scbp->cmd)->active_cmds++; - p->activescbs++; - } - scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); - scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; - if (prev == SCB_LIST_NULL) - { - /* - * This is either the first scb on the waiting list, or we - * have already yanked the first and haven't left any behind. - * Either way, we need to turn off the selection hardware if - * it isn't already off. - */ - aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); - aic_outb(p, CLRSELTIMEO, CLRSINT1); - } - } - else - { - prev = next; - next = aic_inb(p, SCB_NEXT); - } - } - } - } - - /* - * Go through disconnected list and remove any entries we have queued - * for completion, zeroing their control byte too. - */ - if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) - printk(INFO_LEAD "Cleaning disconnected scbs " - "list.\n", p->host_no, channel, target, lun); - if (p->flags & AHC_PAGESCBS) - { - unsigned char next, prev, scb_index; - - next = aic_inb(p, DISCONNECTED_SCBH); - prev = SCB_LIST_NULL; - while (next != SCB_LIST_NULL) - { - aic_outb(p, next, SCBPTR); - scb_index = aic_inb(p, SCB_TAG); - if (scb_index > p->scb_data->numscbs) - { - printk(WARN_LEAD "Disconnected List inconsistency; SCB index=%d, " - "numscbs=%d\n", p->host_no, channel, target, lun, scb_index, - p->scb_data->numscbs); - next = aic7xxx_rem_scb_from_disc_list(p, next, prev); - } - else - { - scbp = p->scb_data->scb_array[scb_index]; - if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) - { - next = aic7xxx_rem_scb_from_disc_list(p, next, prev); - if (scbp->flags & SCB_WAITINGQ) - { - AIC_DEV(scbp->cmd)->active_cmds++; - p->activescbs++; - } - scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); - scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; - scbp->hscb->control = 0; - } - else - { - prev = next; - next = aic_inb(p, SCB_NEXT); - } - } - } - } - - /* - * Walk the free list making sure no entries on the free list have - * a valid SCB_TAG value or SCB_CONTROL byte. - */ - if (p->flags & AHC_PAGESCBS) - { - unsigned char next; - - next = aic_inb(p, FREE_SCBH); - while (next != SCB_LIST_NULL) - { - aic_outb(p, next, SCBPTR); - if (aic_inb(p, SCB_TAG) < p->scb_data->numscbs) - { - printk(WARN_LEAD "Free list inconsistency!.\n", p->host_no, channel, - target, lun); - init_lists = TRUE; - next = SCB_LIST_NULL; - } - else - { - aic_outb(p, SCB_LIST_NULL, SCB_TAG); - aic_outb(p, 0, SCB_CONTROL); - next = aic_inb(p, SCB_NEXT); - } - } - } - - /* - * Go through the hardware SCB array looking for commands that - * were active but not on any list. - */ - if (init_lists) - { - aic_outb(p, SCB_LIST_NULL, FREE_SCBH); - aic_outb(p, SCB_LIST_NULL, WAITING_SCBH); - aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH); - } - for (i = p->scb_data->maxhscbs - 1; i >= 0; i--) - { - unsigned char scbid; - - aic_outb(p, i, SCBPTR); - if (init_lists) - { - aic_outb(p, SCB_LIST_NULL, SCB_TAG); - aic_outb(p, SCB_LIST_NULL, SCB_NEXT); - aic_outb(p, 0, SCB_CONTROL); - aic7xxx_add_curscb_to_free_list(p); - } - else - { - scbid = aic_inb(p, SCB_TAG); - if (scbid < p->scb_data->numscbs) - { - scbp = p->scb_data->scb_array[scbid]; - if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) - { - aic_outb(p, 0, SCB_CONTROL); - aic_outb(p, SCB_LIST_NULL, SCB_TAG); - aic7xxx_add_curscb_to_free_list(p); - } - } - } - } - - /* - * Go through the entire SCB array now and look for commands for - * for this target that are stillactive. These are other (most likely - * tagged) commands that were disconnected when the reset occurred. - * Any commands we find here we know this about, it wasn't on any queue, - * it wasn't in the qinfifo, it wasn't in the disconnected or waiting - * lists, so it really must have been a paged out SCB. In that case, - * we shouldn't need to bother with updating any counters, just mark - * the correct flags and go on. - */ - for (i = 0; i < p->scb_data->numscbs; i++) - { - scbp = p->scb_data->scb_array[i]; - if ((scbp->flags & SCB_ACTIVE) && - aic7xxx_match_scb(p, scbp, target, channel, lun, tag) && - !aic7xxx_scb_on_qoutfifo(p, scbp)) - { - if (scbp->flags & SCB_WAITINGQ) - { - scbq_remove(&p->waiting_scbs, scbp); - scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp); - AIC_DEV(scbp->cmd)->active_cmds++; - p->activescbs++; - } - scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; - scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); - } - } - - aic_outb(p, active_scb, SCBPTR); -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_clear_intstat - * - * Description: - * Clears the interrupt status. - *-F*************************************************************************/ -static void -aic7xxx_clear_intstat(struct aic7xxx_host *p) -{ - /* Clear any interrupt conditions this may have caused. */ - aic_outb(p, CLRSELDO | CLRSELDI | CLRSELINGO, CLRSINT0); - aic_outb(p, CLRSELTIMEO | CLRATNO | CLRSCSIRSTI | CLRBUSFREE | CLRSCSIPERR | - CLRPHASECHG | CLRREQINIT, CLRSINT1); - aic_outb(p, CLRSCSIINT | CLRSEQINT | CLRBRKADRINT | CLRPARERR, CLRINT); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_reset_current_bus - * - * Description: - * Reset the current SCSI bus. - *-F*************************************************************************/ -static void -aic7xxx_reset_current_bus(struct aic7xxx_host *p) -{ - - /* Disable reset interrupts. */ - aic_outb(p, aic_inb(p, SIMODE1) & ~ENSCSIRST, SIMODE1); - - /* Turn off the bus' current operations, after all, we shouldn't have any - * valid commands left to cause a RSELI and SELO once we've tossed the - * bus away with this reset, so we might as well shut down the sequencer - * until the bus is restarted as opposed to saving the current settings - * and restoring them (which makes no sense to me). */ - - /* Turn on the bus reset. */ - aic_outb(p, aic_inb(p, SCSISEQ) | SCSIRSTO, SCSISEQ); - while ( (aic_inb(p, SCSISEQ) & SCSIRSTO) == 0) - mdelay(5); - - /* - * Some of the new Ultra2 chipsets need a longer delay after a chip - * reset than just the init setup creates, so we have to delay here - * before we go into a reset in order to make the chips happy. - */ - if (p->features & AHC_ULTRA2) - mdelay(250); - else - mdelay(50); - - /* Turn off the bus reset. */ - aic_outb(p, 0, SCSISEQ); - mdelay(10); - - aic7xxx_clear_intstat(p); - /* Re-enable reset interrupts. */ - aic_outb(p, aic_inb(p, SIMODE1) | ENSCSIRST, SIMODE1); - -} - -/*+F************************************************************************* - * Function: - * aic7xxx_reset_channel - * - * Description: - * Reset the channel. - *-F*************************************************************************/ -static void -aic7xxx_reset_channel(struct aic7xxx_host *p, int channel, int initiate_reset) -{ - unsigned long offset_min, offset_max; - unsigned char sblkctl; - int cur_channel; - - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - printk(INFO_LEAD "Reset channel called, %s initiate reset.\n", - p->host_no, channel, -1, -1, (initiate_reset==TRUE) ? "will" : "won't" ); - - - if (channel == 1) - { - offset_min = 8; - offset_max = 16; - } - else - { - if (p->features & AHC_TWIN) - { - /* Channel A */ - offset_min = 0; - offset_max = 8; - } - else - { - offset_min = 0; - if (p->features & AHC_WIDE) - { - offset_max = 16; - } - else - { - offset_max = 8; - } - } - } - - while (offset_min < offset_max) - { - /* - * Revert to async/narrow transfers until we renegotiate. - */ - aic_outb(p, 0, TARG_SCSIRATE + offset_min); - if (p->features & AHC_ULTRA2) - { - aic_outb(p, 0, TARG_OFFSET + offset_min); - } - offset_min++; - } - - /* - * Reset the bus and unpause/restart the controller - */ - sblkctl = aic_inb(p, SBLKCTL); - if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) - cur_channel = (sblkctl & SELBUSB) >> 3; - else - cur_channel = 0; - if ( (cur_channel != channel) && (p->features & AHC_TWIN) ) - { - /* - * Case 1: Command for another bus is active - */ - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - printk(INFO_LEAD "Stealthily resetting idle channel.\n", p->host_no, - channel, -1, -1); - /* - * Stealthily reset the other bus without upsetting the current bus. - */ - aic_outb(p, sblkctl ^ SELBUSB, SBLKCTL); - aic_outb(p, aic_inb(p, SIMODE1) & ~ENBUSFREE, SIMODE1); - if (initiate_reset) - { - aic7xxx_reset_current_bus(p); - } - aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ); - aic7xxx_clear_intstat(p); - aic_outb(p, sblkctl, SBLKCTL); - } - else - { - /* - * Case 2: A command from this bus is active or we're idle. - */ - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - printk(INFO_LEAD "Resetting currently active channel.\n", p->host_no, - channel, -1, -1); - aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT), - SIMODE1); - p->flags &= ~AHC_HANDLING_REQINITS; - p->msg_type = MSG_TYPE_NONE; - p->msg_len = 0; - if (initiate_reset) - { - aic7xxx_reset_current_bus(p); - } - aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ); - aic7xxx_clear_intstat(p); - } - if (aic7xxx_verbose & VERBOSE_RESET_RETURN) - printk(INFO_LEAD "Channel reset\n", p->host_no, channel, -1, -1); - /* - * Clean up all the state information for the pending transactions - * on this bus. - */ - aic7xxx_reset_device(p, ALL_TARGETS, channel, ALL_LUNS, SCB_LIST_NULL); - - if ( !(p->features & AHC_TWIN) ) - { - restart_sequencer(p); - } - - return; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_run_waiting_queues - * - * Description: - * Scan the awaiting_scbs queue downloading and starting as many - * scbs as we can. - *-F*************************************************************************/ -static void -aic7xxx_run_waiting_queues(struct aic7xxx_host *p) -{ - struct aic7xxx_scb *scb; - struct aic_dev_data *aic_dev; - int sent; - - - if (p->waiting_scbs.head == NULL) - return; - - sent = 0; - - /* - * First handle SCBs that are waiting but have been assigned a slot. - */ - while ((scb = scbq_remove_head(&p->waiting_scbs)) != NULL) - { - aic_dev = scb->cmd->device->hostdata; - if ( !scb->tag_action ) - { - aic_dev->temp_q_depth = 1; - } - if ( aic_dev->active_cmds >= aic_dev->temp_q_depth) - { - scbq_insert_tail(&aic_dev->delayed_scbs, scb); - } - else - { - scb->flags &= ~SCB_WAITINGQ; - aic_dev->active_cmds++; - p->activescbs++; - if ( !(scb->tag_action) ) - { - aic7xxx_busy_target(p, scb); - } - p->qinfifo[p->qinfifonext++] = scb->hscb->tag; - sent++; - } - } - if (sent) - { - if (p->features & AHC_QUEUE_REGS) - aic_outb(p, p->qinfifonext, HNSCB_QOFF); - else - { - pause_sequencer(p); - aic_outb(p, p->qinfifonext, KERNEL_QINPOS); - unpause_sequencer(p, FALSE); - } - if (p->activescbs > p->max_activescbs) - p->max_activescbs = p->activescbs; - } -} - -#ifdef CONFIG_PCI - -#define DPE 0x80 -#define SSE 0x40 -#define RMA 0x20 -#define RTA 0x10 -#define STA 0x08 -#define DPR 0x01 - -/*+F************************************************************************* - * Function: - * aic7xxx_pci_intr - * - * Description: - * Check the scsi card for PCI errors and clear the interrupt - * - * NOTE: If you don't have this function and a 2940 card encounters - * a PCI error condition, the machine will end up locked as the - * interrupt handler gets slammed with non-stop PCI error interrupts - *-F*************************************************************************/ -static void -aic7xxx_pci_intr(struct aic7xxx_host *p) -{ - unsigned char status1; - - pci_read_config_byte(p->pdev, PCI_STATUS + 1, &status1); - - if ( (status1 & DPE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) - printk(WARN_LEAD "Data Parity Error during PCI address or PCI write" - "phase.\n", p->host_no, -1, -1, -1); - if ( (status1 & SSE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) - printk(WARN_LEAD "Signal System Error Detected\n", p->host_no, - -1, -1, -1); - if ( (status1 & RMA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) - printk(WARN_LEAD "Received a PCI Master Abort\n", p->host_no, - -1, -1, -1); - if ( (status1 & RTA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) - printk(WARN_LEAD "Received a PCI Target Abort\n", p->host_no, - -1, -1, -1); - if ( (status1 & STA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) - printk(WARN_LEAD "Signaled a PCI Target Abort\n", p->host_no, - -1, -1, -1); - if ( (status1 & DPR) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) - printk(WARN_LEAD "Data Parity Error has been reported via PCI pin " - "PERR#\n", p->host_no, -1, -1, -1); - - pci_write_config_byte(p->pdev, PCI_STATUS + 1, status1); - if (status1 & (DPR|RMA|RTA)) - aic_outb(p, CLRPARERR, CLRINT); - - if ( (aic7xxx_panic_on_abort) && (p->spurious_int > 500) ) - aic7xxx_panic_abort(p, NULL); - -} -#endif /* CONFIG_PCI */ - -/*+F************************************************************************* - * Function: - * aic7xxx_construct_ppr - * - * Description: - * Build up a Parallel Protocol Request message for use with SCSI-3 - * devices. - *-F*************************************************************************/ -static void -aic7xxx_construct_ppr(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - p->msg_buf[p->msg_index++] = MSG_EXTENDED; - p->msg_buf[p->msg_index++] = MSG_EXT_PPR_LEN; - p->msg_buf[p->msg_index++] = MSG_EXT_PPR; - p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.period; - p->msg_buf[p->msg_index++] = 0; - p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.offset; - p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.width; - p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.options; - p->msg_len += 8; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_construct_sdtr - * - * Description: - * Constucts a synchronous data transfer message in the message - * buffer on the sequencer. - *-F*************************************************************************/ -static void -aic7xxx_construct_sdtr(struct aic7xxx_host *p, unsigned char period, - unsigned char offset) -{ - p->msg_buf[p->msg_index++] = MSG_EXTENDED; - p->msg_buf[p->msg_index++] = MSG_EXT_SDTR_LEN; - p->msg_buf[p->msg_index++] = MSG_EXT_SDTR; - p->msg_buf[p->msg_index++] = period; - p->msg_buf[p->msg_index++] = offset; - p->msg_len += 5; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_construct_wdtr - * - * Description: - * Constucts a wide data transfer message in the message buffer - * on the sequencer. - *-F*************************************************************************/ -static void -aic7xxx_construct_wdtr(struct aic7xxx_host *p, unsigned char bus_width) -{ - p->msg_buf[p->msg_index++] = MSG_EXTENDED; - p->msg_buf[p->msg_index++] = MSG_EXT_WDTR_LEN; - p->msg_buf[p->msg_index++] = MSG_EXT_WDTR; - p->msg_buf[p->msg_index++] = bus_width; - p->msg_len += 4; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_calc_residual - * - * Description: - * Calculate the residual data not yet transferred. - *-F*************************************************************************/ -static void -aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - struct aic7xxx_hwscb *hscb; - struct scsi_cmnd *cmd; - int actual, i; - - cmd = scb->cmd; - hscb = scb->hscb; - - /* - * Don't destroy valid residual information with - * residual coming from a check sense operation. - */ - if (((scb->hscb->control & DISCONNECTED) == 0) && - (scb->flags & SCB_SENSE) == 0) - { - /* - * We had an underflow. At this time, there's only - * one other driver that bothers to check for this, - * and cmd->underflow seems to be set rather half- - * heartedly in the higher-level SCSI code. - */ - actual = scb->sg_length; - for (i=1; i < hscb->residual_SG_segment_count; i++) - { - actual -= scb->sg_list[scb->sg_count - i].length; - } - actual -= (hscb->residual_data_count[2] << 16) | - (hscb->residual_data_count[1] << 8) | - hscb->residual_data_count[0]; - - if (actual < cmd->underflow) - { - if (aic7xxx_verbose & VERBOSE_MINOR_ERROR) - { - printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG " - "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow, - (rq_data_dir(cmd->request) == WRITE) ? "wrote" : "read", actual, - hscb->residual_SG_segment_count); - printk(INFO_LEAD "status 0x%x.\n", p->host_no, CTL_OF_SCB(scb), - hscb->target_status); - } - /* - * In 2.4, only send back the residual information, don't flag this - * as an error. Before 2.4 we had to flag this as an error because - * the mid layer didn't check residual data counts to see if the - * command needs retried. - */ - scsi_set_resid(cmd, scb->sg_length - actual); - aic7xxx_status(cmd) = hscb->target_status; - } - } - - /* - * Clean out the residual information in the SCB for the - * next consumer. - */ - hscb->residual_data_count[2] = 0; - hscb->residual_data_count[1] = 0; - hscb->residual_data_count[0] = 0; - hscb->residual_SG_segment_count = 0; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_handle_device_reset - * - * Description: - * Interrupt handler for sequencer interrupts (SEQINT). - *-F*************************************************************************/ -static void -aic7xxx_handle_device_reset(struct aic7xxx_host *p, int target, int channel) -{ - unsigned char tindex = target; - - tindex |= ((channel & 0x01) << 3); - - /* - * Go back to async/narrow transfers and renegotiate. - */ - aic_outb(p, 0, TARG_SCSIRATE + tindex); - if (p->features & AHC_ULTRA2) - aic_outb(p, 0, TARG_OFFSET + tindex); - aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL); - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - printk(INFO_LEAD "Bus Device Reset delivered.\n", p->host_no, channel, - target, -1); - aic7xxx_run_done_queue(p, /*complete*/ TRUE); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_handle_seqint - * - * Description: - * Interrupt handler for sequencer interrupts (SEQINT). - *-F*************************************************************************/ -static void -aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat) -{ - struct aic7xxx_scb *scb; - struct aic_dev_data *aic_dev; - unsigned short target_mask; - unsigned char target, lun, tindex; - unsigned char queue_flag = FALSE; - char channel; - int result; - - target = ((aic_inb(p, SAVED_TCL) >> 4) & 0x0f); - if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) - channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3; - else - channel = 0; - tindex = target + (channel << 3); - lun = aic_inb(p, SAVED_TCL) & 0x07; - target_mask = (0x01 << tindex); - - /* - * Go ahead and clear the SEQINT now, that avoids any interrupt race - * conditions later on in case we enable some other interrupt. - */ - aic_outb(p, CLRSEQINT, CLRINT); - switch (intstat & SEQINT_MASK) - { - case NO_MATCH: - { - aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), - SCSISEQ); - printk(WARN_LEAD "No active SCB for reconnecting target - Issuing " - "BUS DEVICE RESET.\n", p->host_no, channel, target, lun); - printk(WARN_LEAD " SAVED_TCL=0x%x, ARG_1=0x%x, SEQADDR=0x%x\n", - p->host_no, channel, target, lun, - aic_inb(p, SAVED_TCL), aic_inb(p, ARG_1), - (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0)); - if (aic7xxx_panic_on_abort) - aic7xxx_panic_abort(p, NULL); - } - break; - - case SEND_REJECT: - { - if (aic7xxx_verbose & VERBOSE_MINOR_ERROR) - printk(INFO_LEAD "Rejecting unknown message (0x%x) received from " - "target, SEQ_FLAGS=0x%x\n", p->host_no, channel, target, lun, - aic_inb(p, ACCUM), aic_inb(p, SEQ_FLAGS)); - } - break; - - case NO_IDENT: - { - /* - * The reconnecting target either did not send an identify - * message, or did, but we didn't find an SCB to match and - * before it could respond to our ATN/abort, it hit a dataphase. - * The only safe thing to do is to blow it away with a bus - * reset. - */ - if (aic7xxx_verbose & (VERBOSE_SEQINT | VERBOSE_RESET_MID)) - printk(INFO_LEAD "Target did not send an IDENTIFY message; " - "LASTPHASE 0x%x, SAVED_TCL 0x%x\n", p->host_no, channel, target, - lun, aic_inb(p, LASTPHASE), aic_inb(p, SAVED_TCL)); - - aic7xxx_reset_channel(p, channel, /*initiate reset*/ TRUE); - aic7xxx_run_done_queue(p, TRUE); - - } - break; - - case BAD_PHASE: - if (aic_inb(p, LASTPHASE) == P_BUSFREE) - { - if (aic7xxx_verbose & VERBOSE_SEQINT) - printk(INFO_LEAD "Missed busfree.\n", p->host_no, channel, - target, lun); - restart_sequencer(p); - } - else - { - if (aic7xxx_verbose & VERBOSE_SEQINT) - printk(INFO_LEAD "Unknown scsi bus phase, continuing\n", p->host_no, - channel, target, lun); - } - break; - - case EXTENDED_MSG: - { - p->msg_type = MSG_TYPE_INITIATOR_MSGIN; - p->msg_len = 0; - p->msg_index = 0; - -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - printk(INFO_LEAD "Enabling REQINITs for MSG_IN\n", p->host_no, - channel, target, lun); -#endif - - /* - * To actually receive the message, simply turn on - * REQINIT interrupts and let our interrupt handler - * do the rest (REQINIT should already be true). - */ - p->flags |= AHC_HANDLING_REQINITS; - aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1); - - /* - * We don't want the sequencer unpaused yet so we return early - */ - return; - } - - case REJECT_MSG: - { - /* - * What we care about here is if we had an outstanding SDTR - * or WDTR message for this target. If we did, this is a - * signal that the target is refusing negotiation. - */ - unsigned char scb_index; - unsigned char last_msg; - - scb_index = aic_inb(p, SCB_TAG); - scb = p->scb_data->scb_array[scb_index]; - aic_dev = AIC_DEV(scb->cmd); - last_msg = aic_inb(p, LAST_MSG); - - if ( (last_msg == MSG_IDENTIFYFLAG) && - (scb->tag_action) && - !(scb->flags & SCB_MSGOUT_BITS) ) - { - if (scb->tag_action == MSG_ORDERED_Q_TAG) - { - /* - * OK...the device seems able to accept tagged commands, but - * not ordered tag commands, only simple tag commands. So, we - * disable ordered tag commands and go on with life just like - * normal. - */ - scsi_adjust_queue_depth(scb->cmd->device, MSG_SIMPLE_TAG, - scb->cmd->device->queue_depth); - scb->tag_action = MSG_SIMPLE_Q_TAG; - scb->hscb->control &= ~SCB_TAG_TYPE; - scb->hscb->control |= MSG_SIMPLE_Q_TAG; - aic_outb(p, scb->hscb->control, SCB_CONTROL); - /* - * OK..we set the tag type to simple tag command, now we re-assert - * ATNO and hope this will take us into the identify phase again - * so we can resend the tag type and info to the device. - */ - aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); - } - else if (scb->tag_action == MSG_SIMPLE_Q_TAG) - { - unsigned char i; - struct aic7xxx_scb *scbp; - int old_verbose; - /* - * Hmmmm....the device is flaking out on tagged commands. - */ - scsi_adjust_queue_depth(scb->cmd->device, 0 /* untagged */, - p->host->cmd_per_lun); - aic_dev->max_q_depth = aic_dev->temp_q_depth = 1; - /* - * We set this command up as a bus device reset. However, we have - * to clear the tag type as it's causing us problems. We shouldn't - * have to worry about any other commands being active, since if - * the device is refusing tagged commands, this should be the - * first tagged command sent to the device, however, we do have - * to worry about any other tagged commands that may already be - * in the qinfifo. The easiest way to do this, is to issue a BDR, - * send all the commands back to the mid level code, then let them - * come back and get rebuilt as untagged commands. - */ - scb->tag_action = 0; - scb->hscb->control &= ~(TAG_ENB | SCB_TAG_TYPE); - aic_outb(p, scb->hscb->control, SCB_CONTROL); - - old_verbose = aic7xxx_verbose; - aic7xxx_verbose &= ~(VERBOSE_RESET|VERBOSE_ABORT); - for (i=0; i < p->scb_data->numscbs; i++) - { - scbp = p->scb_data->scb_array[i]; - if ((scbp->flags & SCB_ACTIVE) && (scbp != scb)) - { - if (aic7xxx_match_scb(p, scbp, target, channel, lun, i)) - { - aic7xxx_reset_device(p, target, channel, lun, i); - } - } - } - aic7xxx_run_done_queue(p, TRUE); - aic7xxx_verbose = old_verbose; - /* - * Wait until after the for loop to set the busy index since - * aic7xxx_reset_device will clear the busy index during its - * operation. - */ - aic7xxx_busy_target(p, scb); - printk(INFO_LEAD "Device is refusing tagged commands, using " - "untagged I/O.\n", p->host_no, channel, target, lun); - aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); - } - } - else if (scb->flags & SCB_MSGOUT_PPR) - { - /* - * As per the draft specs, any device capable of supporting any of - * the option values other than 0 are not allowed to reject the - * PPR message. Instead, they must negotiate out what they do - * support instead of rejecting our offering or else they cause - * a parity error during msg_out phase to signal that they don't - * like our settings. - */ - aic_dev->needppr = aic_dev->needppr_copy = 0; - aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT, - (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE), aic_dev); - aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, - AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, - aic_dev); - aic_dev->goal.options = aic_dev->dtr_pending = 0; - scb->flags &= ~SCB_MSGOUT_BITS; - if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Device is rejecting PPR messages, falling " - "back.\n", p->host_no, channel, target, lun); - } - if ( aic_dev->goal.width ) - { - aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; - aic_dev->dtr_pending = 1; - scb->flags |= SCB_MSGOUT_WDTR; - } - if ( aic_dev->goal.offset ) - { - aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; - if( !aic_dev->dtr_pending ) - { - aic_dev->dtr_pending = 1; - scb->flags |= SCB_MSGOUT_SDTR; - } - } - if ( aic_dev->dtr_pending ) - { - aic_outb(p, HOST_MSG, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); - } - } - else if (scb->flags & SCB_MSGOUT_WDTR) - { - /* - * note 8bit xfers and clear flag - */ - aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; - scb->flags &= ~SCB_MSGOUT_BITS; - aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT, - (AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR), aic_dev); - aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, - AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, - aic_dev); - if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Device is rejecting WDTR messages, using " - "narrow transfers.\n", p->host_no, channel, target, lun); - } - aic_dev->needsdtr = aic_dev->needsdtr_copy; - } - else if (scb->flags & SCB_MSGOUT_SDTR) - { - /* - * note asynch xfers and clear flag - */ - aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; - scb->flags &= ~SCB_MSGOUT_BITS; - aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, - (AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL), aic_dev); - if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Device is rejecting SDTR messages, using " - "async transfers.\n", p->host_no, channel, target, lun); - } - } - else if (aic7xxx_verbose & VERBOSE_SEQINT) - { - /* - * Otherwise, we ignore it. - */ - printk(INFO_LEAD "Received MESSAGE_REJECT for unknown cause. " - "Ignoring.\n", p->host_no, channel, target, lun); - } - } - break; - - case BAD_STATUS: - { - unsigned char scb_index; - struct aic7xxx_hwscb *hscb; - struct scsi_cmnd *cmd; - - /* The sequencer will notify us when a command has an error that - * would be of interest to the kernel. This allows us to leave - * the sequencer running in the common case of command completes - * without error. The sequencer will have DMA'd the SCB back - * up to us, so we can reference the drivers SCB array. - * - * Set the default return value to 0 indicating not to send - * sense. The sense code will change this if needed and this - * reduces code duplication. - */ - aic_outb(p, 0, RETURN_1); - scb_index = aic_inb(p, SCB_TAG); - if (scb_index > p->scb_data->numscbs) - { - printk(WARN_LEAD "Invalid SCB during SEQINT 0x%02x, SCB_TAG %d.\n", - p->host_no, channel, target, lun, intstat, scb_index); - break; - } - scb = p->scb_data->scb_array[scb_index]; - hscb = scb->hscb; - - if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) - { - printk(WARN_LEAD "Invalid SCB during SEQINT 0x%x, scb %d, flags 0x%x," - " cmd 0x%lx.\n", p->host_no, channel, target, lun, intstat, - scb_index, scb->flags, (unsigned long) scb->cmd); - } - else - { - cmd = scb->cmd; - aic_dev = AIC_DEV(scb->cmd); - hscb->target_status = aic_inb(p, SCB_TARGET_STATUS); - aic7xxx_status(cmd) = hscb->target_status; - - cmd->result = hscb->target_status; - - switch (status_byte(hscb->target_status)) - { - case GOOD: - if (aic7xxx_verbose & VERBOSE_SEQINT) - printk(INFO_LEAD "Interrupted for status of GOOD???\n", - p->host_no, CTL_OF_SCB(scb)); - break; - - case COMMAND_TERMINATED: - case CHECK_CONDITION: - if ( !(scb->flags & SCB_SENSE) ) - { - /* - * Send a sense command to the requesting target. - * XXX - revisit this and get rid of the memcopys. - */ - memcpy(scb->sense_cmd, &generic_sense[0], - sizeof(generic_sense)); - - scb->sense_cmd[1] = (cmd->device->lun << 5); - scb->sense_cmd[4] = SCSI_SENSE_BUFFERSIZE; - - scb->sg_list[0].length = - cpu_to_le32(SCSI_SENSE_BUFFERSIZE); - scb->sg_list[0].address = - cpu_to_le32(pci_map_single(p->pdev, cmd->sense_buffer, - SCSI_SENSE_BUFFERSIZE, - PCI_DMA_FROMDEVICE)); - - /* - * XXX - We should allow disconnection, but can't as it - * might allow overlapped tagged commands. - */ - /* hscb->control &= DISCENB; */ - hscb->control = 0; - hscb->target_status = 0; - hscb->SG_list_pointer = - cpu_to_le32(SCB_DMA_ADDR(scb, scb->sg_list)); - hscb->SCSI_cmd_pointer = - cpu_to_le32(SCB_DMA_ADDR(scb, scb->sense_cmd)); - hscb->data_count = scb->sg_list[0].length; - hscb->data_pointer = scb->sg_list[0].address; - hscb->SCSI_cmd_length = COMMAND_SIZE(scb->sense_cmd[0]); - hscb->residual_SG_segment_count = 0; - hscb->residual_data_count[0] = 0; - hscb->residual_data_count[1] = 0; - hscb->residual_data_count[2] = 0; - - scb->sg_count = hscb->SG_segment_count = 1; - scb->sg_length = SCSI_SENSE_BUFFERSIZE; - scb->tag_action = 0; - scb->flags |= SCB_SENSE; - /* - * Ensure the target is busy since this will be an - * an untagged request. - */ -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - if (scb->flags & SCB_MSGOUT_BITS) - printk(INFO_LEAD "Requesting SENSE with %s\n", p->host_no, - CTL_OF_SCB(scb), (scb->flags & SCB_MSGOUT_SDTR) ? - "SDTR" : "WDTR"); - else - printk(INFO_LEAD "Requesting SENSE, no MSG\n", p->host_no, - CTL_OF_SCB(scb)); - } -#endif - aic7xxx_busy_target(p, scb); - aic_outb(p, SEND_SENSE, RETURN_1); - aic7xxx_error(cmd) = DID_OK; - break; - } /* first time sense, no errors */ - printk(INFO_LEAD "CHECK_CONDITION on REQUEST_SENSE, returning " - "an error.\n", p->host_no, CTL_OF_SCB(scb)); - aic7xxx_error(cmd) = DID_ERROR; - scb->flags &= ~SCB_SENSE; - break; - - case QUEUE_FULL: - queue_flag = TRUE; /* Mark that this is a QUEUE_FULL and */ - case BUSY: /* drop through to here */ - { - struct aic7xxx_scb *next_scbp, *prev_scbp; - unsigned char active_hscb, next_hscb, prev_hscb, scb_index; - /* - * We have to look three places for queued commands: - * 1: p->waiting_scbs queue - * 2: QINFIFO - * 3: WAITING_SCBS list on card (for commands that are started - * but haven't yet made it to the device) - * - * Of special note here is that commands on 2 or 3 above will - * have already been marked as active, while commands on 1 will - * not. The aic7xxx_done() function will want to unmark them - * from active, so any commands we pull off of 1 need to - * up the active count. - */ - next_scbp = p->waiting_scbs.head; - while ( next_scbp != NULL ) - { - prev_scbp = next_scbp; - next_scbp = next_scbp->q_next; - if ( aic7xxx_match_scb(p, prev_scbp, target, channel, lun, - SCB_LIST_NULL) ) - { - scbq_remove(&p->waiting_scbs, prev_scbp); - scb->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL; - p->activescbs++; - aic_dev->active_cmds++; - } - } - aic7xxx_search_qinfifo(p, target, channel, lun, - SCB_LIST_NULL, SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL, - FALSE, NULL); - next_scbp = NULL; - active_hscb = aic_inb(p, SCBPTR); - prev_hscb = next_hscb = scb_index = SCB_LIST_NULL; - next_hscb = aic_inb(p, WAITING_SCBH); - while (next_hscb != SCB_LIST_NULL) - { - aic_outb(p, next_hscb, SCBPTR); - scb_index = aic_inb(p, SCB_TAG); - if (scb_index < p->scb_data->numscbs) - { - next_scbp = p->scb_data->scb_array[scb_index]; - if (aic7xxx_match_scb(p, next_scbp, target, channel, lun, - SCB_LIST_NULL) ) - { - next_scbp->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL; - next_hscb = aic_inb(p, SCB_NEXT); - aic_outb(p, 0, SCB_CONTROL); - aic_outb(p, SCB_LIST_NULL, SCB_TAG); - aic7xxx_add_curscb_to_free_list(p); - if (prev_hscb == SCB_LIST_NULL) - { - /* We were first on the list, - * so we kill the selection - * hardware. Let the sequencer - * re-init the hardware itself - */ - aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); - aic_outb(p, CLRSELTIMEO, CLRSINT1); - aic_outb(p, next_hscb, WAITING_SCBH); - } - else - { - aic_outb(p, prev_hscb, SCBPTR); - aic_outb(p, next_hscb, SCB_NEXT); - } - } - else - { - prev_hscb = next_hscb; - next_hscb = aic_inb(p, SCB_NEXT); - } - } /* scb_index >= p->scb_data->numscbs */ - } - aic_outb(p, active_hscb, SCBPTR); - aic7xxx_run_done_queue(p, FALSE); - -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if( (aic7xxx_verbose & VERBOSE_MINOR_ERROR) || - (aic7xxx_verbose > 0xffff) ) - { - if (queue_flag) - printk(INFO_LEAD "Queue full received; queue depth %d, " - "active %d\n", p->host_no, CTL_OF_SCB(scb), - aic_dev->max_q_depth, aic_dev->active_cmds); - else - printk(INFO_LEAD "Target busy\n", p->host_no, CTL_OF_SCB(scb)); - } -#endif - if (queue_flag) - { - int diff; - result = scsi_track_queue_full(cmd->device, - aic_dev->active_cmds); - if ( result < 0 ) - { - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - printk(INFO_LEAD "Tagged Command Queueing disabled.\n", - p->host_no, CTL_OF_SCB(scb)); - diff = aic_dev->max_q_depth - p->host->cmd_per_lun; - aic_dev->temp_q_depth = 1; - aic_dev->max_q_depth = 1; - } - else if ( result > 0 ) - { - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - printk(INFO_LEAD "Queue depth reduced to %d\n", p->host_no, - CTL_OF_SCB(scb), result); - diff = aic_dev->max_q_depth - result; - aic_dev->max_q_depth = result; - /* temp_q_depth could have been dropped to 1 for an untagged - * command that might be coming up */ - if(aic_dev->temp_q_depth > result) - aic_dev->temp_q_depth = result; - } - /* We should free up the no unused SCB entries. But, that's - * a difficult thing to do because we use a direct indexed - * array, so we can't just take any entries and free them, - * we *have* to free the ones at the end of the array, and - * they very well could be in use right now, which means - * in order to do this right, we have to add a delayed - * freeing mechanism tied into the scb_free() code area. - * We'll add that later. - */ - } - break; - } - - default: - if (aic7xxx_verbose & VERBOSE_SEQINT) - printk(INFO_LEAD "Unexpected target status 0x%x.\n", p->host_no, - CTL_OF_SCB(scb), scb->hscb->target_status); - if (!aic7xxx_error(cmd)) - { - aic7xxx_error(cmd) = DID_RETRY_COMMAND; - } - break; - } /* end switch */ - } /* end else of */ - } - break; - - case AWAITING_MSG: - { - unsigned char scb_index, msg_out; - - scb_index = aic_inb(p, SCB_TAG); - msg_out = aic_inb(p, MSG_OUT); - scb = p->scb_data->scb_array[scb_index]; - aic_dev = AIC_DEV(scb->cmd); - p->msg_index = p->msg_len = 0; - /* - * This SCB had a MK_MESSAGE set in its control byte informing - * the sequencer that we wanted to send a special message to - * this target. - */ - - if ( !(scb->flags & SCB_DEVICE_RESET) && - (msg_out == MSG_IDENTIFYFLAG) && - (scb->hscb->control & TAG_ENB) ) - { - p->msg_buf[p->msg_index++] = scb->tag_action; - p->msg_buf[p->msg_index++] = scb->hscb->tag; - p->msg_len += 2; - } - - if (scb->flags & SCB_DEVICE_RESET) - { - p->msg_buf[p->msg_index++] = MSG_BUS_DEV_RESET; - p->msg_len++; - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - printk(INFO_LEAD "Bus device reset mailed.\n", - p->host_no, CTL_OF_SCB(scb)); - } - else if (scb->flags & SCB_ABORT) - { - if (scb->tag_action) - { - p->msg_buf[p->msg_index++] = MSG_ABORT_TAG; - } - else - { - p->msg_buf[p->msg_index++] = MSG_ABORT; - } - p->msg_len++; - if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) - printk(INFO_LEAD "Abort message mailed.\n", p->host_no, - CTL_OF_SCB(scb)); - } - else if (scb->flags & SCB_MSGOUT_PPR) - { - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Sending PPR (%d/%d/%d/%d) message.\n", - p->host_no, CTL_OF_SCB(scb), - aic_dev->goal.period, - aic_dev->goal.offset, - aic_dev->goal.width, - aic_dev->goal.options); - } - aic7xxx_construct_ppr(p, scb); - } - else if (scb->flags & SCB_MSGOUT_WDTR) - { - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Sending WDTR message.\n", p->host_no, - CTL_OF_SCB(scb)); - } - aic7xxx_construct_wdtr(p, aic_dev->goal.width); - } - else if (scb->flags & SCB_MSGOUT_SDTR) - { - unsigned int max_sync, period; - unsigned char options = 0; - /* - * Now that the device is selected, use the bits in SBLKCTL and - * SSTAT2 to determine the max sync rate for this device. - */ - if (p->features & AHC_ULTRA2) - { - if ( (aic_inb(p, SBLKCTL) & ENAB40) && - !(aic_inb(p, SSTAT2) & EXP_ACTIVE) ) - { - max_sync = AHC_SYNCRATE_ULTRA2; - } - else - { - max_sync = AHC_SYNCRATE_ULTRA; - } - } - else if (p->features & AHC_ULTRA) - { - max_sync = AHC_SYNCRATE_ULTRA; - } - else - { - max_sync = AHC_SYNCRATE_FAST; - } - period = aic_dev->goal.period; - aic7xxx_find_syncrate(p, &period, max_sync, &options); - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Sending SDTR %d/%d message.\n", p->host_no, - CTL_OF_SCB(scb), period, - aic_dev->goal.offset); - } - aic7xxx_construct_sdtr(p, period, aic_dev->goal.offset); - } - else - { - panic("aic7xxx: AWAITING_MSG for an SCB that does " - "not have a waiting message.\n"); - } - /* - * We've set everything up to send our message, now to actually do - * so we need to enable reqinit interrupts and let the interrupt - * handler do the rest. We don't want to unpause the sequencer yet - * though so we'll return early. We also have to make sure that - * we clear the SEQINT *BEFORE* we set the REQINIT handler active - * or else it's possible on VLB cards to lose the first REQINIT - * interrupt. Edge triggered EISA cards could also lose this - * interrupt, although PCI and level triggered cards should not - * have this problem since they continually interrupt the kernel - * until we take care of the situation. - */ - scb->flags |= SCB_MSGOUT_SENT; - p->msg_index = 0; - p->msg_type = MSG_TYPE_INITIATOR_MSGOUT; - p->flags |= AHC_HANDLING_REQINITS; - aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1); - return; - } - break; - - case DATA_OVERRUN: - { - unsigned char scb_index = aic_inb(p, SCB_TAG); - unsigned char lastphase = aic_inb(p, LASTPHASE); - unsigned int i; - - scb = (p->scb_data->scb_array[scb_index]); - /* - * XXX - What do we really want to do on an overrun? The - * mid-level SCSI code should handle this, but for now, - * we'll just indicate that the command should retried. - * If we retrieved sense info on this target, then the - * base SENSE info should have been saved prior to the - * overrun error. In that case, we return DID_OK and let - * the mid level code pick up on the sense info. Otherwise - * we return DID_ERROR so the command will get retried. - */ - if ( !(scb->flags & SCB_SENSE) ) - { - printk(WARN_LEAD "Data overrun detected in %s phase, tag %d;\n", - p->host_no, CTL_OF_SCB(scb), - (lastphase == P_DATAIN) ? "Data-In" : "Data-Out", scb->hscb->tag); - printk(KERN_WARNING " %s seen Data Phase. Length=%d, NumSGs=%d.\n", - (aic_inb(p, SEQ_FLAGS) & DPHASE) ? "Have" : "Haven't", - scb->sg_length, scb->sg_count); - printk(KERN_WARNING " Raw SCSI Command: 0x"); - for (i = 0; i < scb->hscb->SCSI_cmd_length; i++) - { - printk("%02x ", scb->cmd->cmnd[i]); - } - printk("\n"); - if(aic7xxx_verbose > 0xffff) - { - for (i = 0; i < scb->sg_count; i++) - { - printk(KERN_WARNING " sg[%d] - Addr 0x%x : Length %d\n", - i, - le32_to_cpu(scb->sg_list[i].address), - le32_to_cpu(scb->sg_list[i].length) ); - } - } - aic7xxx_error(scb->cmd) = DID_ERROR; - } - else - printk(INFO_LEAD "Data Overrun during SEND_SENSE operation.\n", - p->host_no, CTL_OF_SCB(scb)); - } - break; - - case WIDE_RESIDUE: - { - unsigned char resid_sgcnt, index; - unsigned char scb_index = aic_inb(p, SCB_TAG); - unsigned int cur_addr, resid_dcnt; - unsigned int native_addr, native_length, sg_addr; - int i; - - if(scb_index > p->scb_data->numscbs) - { - printk(WARN_LEAD "invalid scb_index during WIDE_RESIDUE.\n", - p->host_no, -1, -1, -1); - /* - * XXX: Add error handling here - */ - break; - } - scb = p->scb_data->scb_array[scb_index]; - if(!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) - { - printk(WARN_LEAD "invalid scb during WIDE_RESIDUE flags:0x%x " - "scb->cmd:0x%lx\n", p->host_no, CTL_OF_SCB(scb), - scb->flags, (unsigned long)scb->cmd); - break; - } - if(aic7xxx_verbose & VERBOSE_MINOR_ERROR) - printk(INFO_LEAD "Got WIDE_RESIDUE message, patching up data " - "pointer.\n", p->host_no, CTL_OF_SCB(scb)); - - /* - * We have a valid scb to use on this WIDE_RESIDUE message, so - * we need to walk the sg list looking for this particular sg - * segment, then see if we happen to be at the very beginning of - * the segment. If we are, then we have to back things up to - * the previous segment. If not, then we simply need to remove - * one byte from this segments address and add one to the byte - * count. - */ - cur_addr = aic_inb(p, SHADDR) | (aic_inb(p, SHADDR + 1) << 8) | - (aic_inb(p, SHADDR + 2) << 16) | (aic_inb(p, SHADDR + 3) << 24); - sg_addr = aic_inb(p, SG_COUNT + 1) | (aic_inb(p, SG_COUNT + 2) << 8) | - (aic_inb(p, SG_COUNT + 3) << 16) | (aic_inb(p, SG_COUNT + 4) << 24); - resid_sgcnt = aic_inb(p, SCB_RESID_SGCNT); - resid_dcnt = aic_inb(p, SCB_RESID_DCNT) | - (aic_inb(p, SCB_RESID_DCNT + 1) << 8) | - (aic_inb(p, SCB_RESID_DCNT + 2) << 16); - index = scb->sg_count - ((resid_sgcnt) ? resid_sgcnt : 1); - native_addr = le32_to_cpu(scb->sg_list[index].address); - native_length = le32_to_cpu(scb->sg_list[index].length); - /* - * If resid_dcnt == native_length, then we just loaded this SG - * segment and we need to back it up one... - */ - if(resid_dcnt == native_length) - { - if(index == 0) - { - /* - * Oops, this isn't right, we can't back up to before the - * beginning. This must be a bogus message, ignore it. - */ - break; - } - resid_dcnt = 1; - resid_sgcnt += 1; - native_addr = le32_to_cpu(scb->sg_list[index - 1].address); - native_length = le32_to_cpu(scb->sg_list[index - 1].length); - cur_addr = native_addr + (native_length - 1); - sg_addr -= sizeof(struct hw_scatterlist); - } - else - { - /* - * resid_dcnt != native_length, so we are in the middle of a SG - * element. Back it up one byte and leave the rest alone. - */ - resid_dcnt += 1; - cur_addr -= 1; - } - - /* - * Output the new addresses and counts to the right places on the - * card. - */ - aic_outb(p, resid_sgcnt, SG_COUNT); - aic_outb(p, resid_sgcnt, SCB_RESID_SGCNT); - aic_outb(p, sg_addr & 0xff, SG_COUNT + 1); - aic_outb(p, (sg_addr >> 8) & 0xff, SG_COUNT + 2); - aic_outb(p, (sg_addr >> 16) & 0xff, SG_COUNT + 3); - aic_outb(p, (sg_addr >> 24) & 0xff, SG_COUNT + 4); - aic_outb(p, resid_dcnt & 0xff, SCB_RESID_DCNT); - aic_outb(p, (resid_dcnt >> 8) & 0xff, SCB_RESID_DCNT + 1); - aic_outb(p, (resid_dcnt >> 16) & 0xff, SCB_RESID_DCNT + 2); - - /* - * The sequencer actually wants to find the new address - * in the SHADDR register set. On the Ultra2 and later controllers - * this register set is readonly. In order to get the right number - * into the register, you actually have to enter it in HADDR and then - * use the PRELOADEN bit of DFCNTRL to drop it through from the - * HADDR register to the SHADDR register. On non-Ultra2 controllers, - * we simply write it direct. - */ - if(p->features & AHC_ULTRA2) - { - /* - * We might as well be accurate and drop both the resid_dcnt and - * cur_addr into HCNT and HADDR and have both of them drop - * through to the shadow layer together. - */ - aic_outb(p, resid_dcnt & 0xff, HCNT); - aic_outb(p, (resid_dcnt >> 8) & 0xff, HCNT + 1); - aic_outb(p, (resid_dcnt >> 16) & 0xff, HCNT + 2); - aic_outb(p, cur_addr & 0xff, HADDR); - aic_outb(p, (cur_addr >> 8) & 0xff, HADDR + 1); - aic_outb(p, (cur_addr >> 16) & 0xff, HADDR + 2); - aic_outb(p, (cur_addr >> 24) & 0xff, HADDR + 3); - aic_outb(p, aic_inb(p, DMAPARAMS) | PRELOADEN, DFCNTRL); - udelay(1); - aic_outb(p, aic_inb(p, DMAPARAMS) & ~(SCSIEN|HDMAEN), DFCNTRL); - i=0; - while(((aic_inb(p, DFCNTRL) & (SCSIEN|HDMAEN)) != 0) && (i++ < 1000)) - { - udelay(1); - } - } - else - { - aic_outb(p, cur_addr & 0xff, SHADDR); - aic_outb(p, (cur_addr >> 8) & 0xff, SHADDR + 1); - aic_outb(p, (cur_addr >> 16) & 0xff, SHADDR + 2); - aic_outb(p, (cur_addr >> 24) & 0xff, SHADDR + 3); - } - } - break; - - case SEQ_SG_FIXUP: - { - unsigned char scb_index, tmp; - int sg_addr, sg_length; - - scb_index = aic_inb(p, SCB_TAG); - - if(scb_index > p->scb_data->numscbs) - { - printk(WARN_LEAD "invalid scb_index during SEQ_SG_FIXUP.\n", - p->host_no, -1, -1, -1); - printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 " - "0x%x\n", p->host_no, -1, -1, -1, - aic_inb(p, SCSISIGI), - aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), - aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); - printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n", - p->host_no, -1, -1, -1, aic_inb(p, SG_CACHEPTR), - aic_inb(p, SSTAT2), aic_inb(p, STCNT + 2) << 16 | - aic_inb(p, STCNT + 1) << 8 | aic_inb(p, STCNT)); - /* - * XXX: Add error handling here - */ - break; - } - scb = p->scb_data->scb_array[scb_index]; - if(!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) - { - printk(WARN_LEAD "invalid scb during SEQ_SG_FIXUP flags:0x%x " - "scb->cmd:0x%p\n", p->host_no, CTL_OF_SCB(scb), - scb->flags, scb->cmd); - printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 " - "0x%x\n", p->host_no, CTL_OF_SCB(scb), - aic_inb(p, SCSISIGI), - aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), - aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); - printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n", - p->host_no, CTL_OF_SCB(scb), aic_inb(p, SG_CACHEPTR), - aic_inb(p, SSTAT2), aic_inb(p, STCNT + 2) << 16 | - aic_inb(p, STCNT + 1) << 8 | aic_inb(p, STCNT)); - break; - } - if(aic7xxx_verbose & VERBOSE_MINOR_ERROR) - printk(INFO_LEAD "Fixing up SG address for sequencer.\n", p->host_no, - CTL_OF_SCB(scb)); - /* - * Advance the SG pointer to the next element in the list - */ - tmp = aic_inb(p, SG_NEXT); - tmp += SG_SIZEOF; - aic_outb(p, tmp, SG_NEXT); - if( tmp < SG_SIZEOF ) - aic_outb(p, aic_inb(p, SG_NEXT + 1) + 1, SG_NEXT + 1); - tmp = aic_inb(p, SG_COUNT) - 1; - aic_outb(p, tmp, SG_COUNT); - sg_addr = le32_to_cpu(scb->sg_list[scb->sg_count - tmp].address); - sg_length = le32_to_cpu(scb->sg_list[scb->sg_count - tmp].length); - /* - * Now stuff the element we just advanced past down onto the - * card so it can be stored in the residual area. - */ - aic_outb(p, sg_addr & 0xff, HADDR); - aic_outb(p, (sg_addr >> 8) & 0xff, HADDR + 1); - aic_outb(p, (sg_addr >> 16) & 0xff, HADDR + 2); - aic_outb(p, (sg_addr >> 24) & 0xff, HADDR + 3); - aic_outb(p, sg_length & 0xff, HCNT); - aic_outb(p, (sg_length >> 8) & 0xff, HCNT + 1); - aic_outb(p, (sg_length >> 16) & 0xff, HCNT + 2); - aic_outb(p, (tmp << 2) | ((tmp == 1) ? LAST_SEG : 0), SG_CACHEPTR); - aic_outb(p, aic_inb(p, DMAPARAMS), DFCNTRL); - while(aic_inb(p, SSTAT0) & SDONE) udelay(1); - while(aic_inb(p, DFCNTRL) & (HDMAEN|SCSIEN)) aic_outb(p, 0, DFCNTRL); - } - break; - -#ifdef AIC7XXX_NOT_YET - case TRACEPOINT2: - { - printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no, - channel, target, lun); - } - break; - - /* XXX Fill these in later */ - case MSG_BUFFER_BUSY: - printk("aic7xxx: Message buffer busy.\n"); - break; - case MSGIN_PHASEMIS: - printk("aic7xxx: Message-in phasemis.\n"); - break; -#endif - - default: /* unknown */ - printk(WARN_LEAD "Unknown SEQINT, INTSTAT 0x%x, SCSISIGI 0x%x.\n", - p->host_no, channel, target, lun, intstat, - aic_inb(p, SCSISIGI)); - break; - } - - /* - * Clear the sequencer interrupt and unpause the sequencer. - */ - unpause_sequencer(p, /* unpause always */ TRUE); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_parse_msg - * - * Description: - * Parses incoming messages into actions on behalf of - * aic7xxx_handle_reqinit - *_F*************************************************************************/ -static int -aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - int reject, reply, done; - unsigned char target_scsirate, tindex; - unsigned short target_mask; - unsigned char target, channel, lun; - unsigned char bus_width, new_bus_width; - unsigned char trans_options, new_trans_options; - unsigned int period, new_period, offset, new_offset, maxsync; - struct aic7xxx_syncrate *syncrate; - struct aic_dev_data *aic_dev; - - target = scb->cmd->device->id; - channel = scb->cmd->device->channel; - lun = scb->cmd->device->lun; - reply = reject = done = FALSE; - tindex = TARGET_INDEX(scb->cmd); - aic_dev = AIC_DEV(scb->cmd); - target_scsirate = aic_inb(p, TARG_SCSIRATE + tindex); - target_mask = (0x01 << tindex); - - /* - * Parse as much of the message as is available, - * rejecting it if we don't support it. When - * the entire message is available and has been - * handled, return TRUE indicating that we have - * parsed an entire message. - */ - - if (p->msg_buf[0] != MSG_EXTENDED) - { - reject = TRUE; - } - - /* - * Even if we are an Ultra3 card, don't allow Ultra3 sync rates when - * using the SDTR messages. We need the PPR messages to enable the - * higher speeds that include things like Dual Edge clocking. - */ - if (p->features & AHC_ULTRA2) - { - if ( (aic_inb(p, SBLKCTL) & ENAB40) && - !(aic_inb(p, SSTAT2) & EXP_ACTIVE) ) - { - if (p->features & AHC_ULTRA3) - maxsync = AHC_SYNCRATE_ULTRA3; - else - maxsync = AHC_SYNCRATE_ULTRA2; - } - else - { - maxsync = AHC_SYNCRATE_ULTRA; - } - } - else if (p->features & AHC_ULTRA) - { - maxsync = AHC_SYNCRATE_ULTRA; - } - else - { - maxsync = AHC_SYNCRATE_FAST; - } - - /* - * Just accept the length byte outright and perform - * more checking once we know the message type. - */ - - if ( !reject && (p->msg_len > 2) ) - { - switch(p->msg_buf[2]) - { - case MSG_EXT_SDTR: - { - - if (p->msg_buf[1] != MSG_EXT_SDTR_LEN) - { - reject = TRUE; - break; - } - - if (p->msg_len < (MSG_EXT_SDTR_LEN + 2)) - { - break; - } - - period = new_period = p->msg_buf[3]; - offset = new_offset = p->msg_buf[4]; - trans_options = new_trans_options = 0; - bus_width = new_bus_width = target_scsirate & WIDEXFER; - - /* - * If our current max syncrate is in the Ultra3 range, bump it back - * down to Ultra2 since we can't negotiate DT transfers using SDTR - */ - if(maxsync == AHC_SYNCRATE_ULTRA3) - maxsync = AHC_SYNCRATE_ULTRA2; - - /* - * We might have a device that is starting negotiation with us - * before we can start up negotiation with it....be prepared to - * have a device ask for a higher speed then we want to give it - * in that case - */ - if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) != - (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) ) - { - if (!(aic_dev->flags & DEVICE_DTR_SCANNED)) - { - /* - * We shouldn't get here unless this is a narrow drive, wide - * devices should trigger this same section of code in the WDTR - * handler first instead. - */ - aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT; - aic_dev->goal.options = 0; - if(p->user[tindex].offset) - { - aic_dev->needsdtr_copy = 1; - aic_dev->goal.period = max_t(unsigned char, 10,p->user[tindex].period); - if(p->features & AHC_ULTRA2) - { - aic_dev->goal.offset = MAX_OFFSET_ULTRA2; - } - else - { - aic_dev->goal.offset = MAX_OFFSET_8BIT; - } - } - else - { - aic_dev->needsdtr_copy = 0; - aic_dev->goal.period = 255; - aic_dev->goal.offset = 0; - } - aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR; - } - else if (aic_dev->needsdtr_copy == 0) - { - /* - * This is a preemptive message from the target, we've already - * scanned this target and set our options for it, and we - * don't need a SDTR with this target (for whatever reason), - * so reject this incoming SDTR - */ - reject = TRUE; - break; - } - - /* The device is sending this message first and we have to reply */ - reply = TRUE; - - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Received pre-emptive SDTR message from " - "target.\n", p->host_no, CTL_OF_SCB(scb)); - } - /* - * Validate the values the device passed to us against our SEEPROM - * settings. We don't have to do this if we aren't replying since - * the device isn't allowed to send values greater than the ones - * we first sent to it. - */ - new_period = max_t(unsigned int, period, aic_dev->goal.period); - new_offset = min_t(unsigned int, offset, aic_dev->goal.offset); - } - - /* - * Use our new_period, new_offset, bus_width, and card options - * to determine the actual syncrate settings - */ - syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync, - &trans_options); - aic7xxx_validate_offset(p, syncrate, &new_offset, bus_width); - - /* - * Did we drop to async? If so, send a reply regardless of whether - * or not we initiated this negotiation. - */ - if ((new_offset == 0) && (new_offset != offset)) - { - aic_dev->needsdtr_copy = 0; - reply = TRUE; - } - - /* - * Did we start this, if not, or if we went too low and had to - * go async, then send an SDTR back to the target - */ - if(reply) - { - /* when sending a reply, make sure that the goal settings are - * updated along with current and active since the code that - * will actually build the message for the sequencer uses the - * goal settings as its guidelines. - */ - aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, - new_offset, trans_options, - AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, - aic_dev); - scb->flags &= ~SCB_MSGOUT_BITS; - scb->flags |= SCB_MSGOUT_SDTR; - aic_outb(p, HOST_MSG, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); - } - else - { - aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, - new_offset, trans_options, - AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); - aic_dev->needsdtr = 0; - } - done = TRUE; - break; - } - case MSG_EXT_WDTR: - { - - if (p->msg_buf[1] != MSG_EXT_WDTR_LEN) - { - reject = TRUE; - break; - } - - if (p->msg_len < (MSG_EXT_WDTR_LEN + 2)) - { - break; - } - - bus_width = new_bus_width = p->msg_buf[3]; - - if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR)) == - (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR) ) - { - switch(bus_width) - { - default: - { - reject = TRUE; - if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && - ((aic_dev->flags & DEVICE_PRINT_DTR) || - (aic7xxx_verbose > 0xffff)) ) - { - printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n", - p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width)); - } - } /* We fall through on purpose */ - case MSG_EXT_WDTR_BUS_8_BIT: - { - aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT; - aic_dev->needwdtr_copy &= ~target_mask; - break; - } - case MSG_EXT_WDTR_BUS_16_BIT: - { - break; - } - } - aic_dev->needwdtr = 0; - aic7xxx_set_width(p, target, channel, lun, new_bus_width, - AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); - } - else - { - if ( !(aic_dev->flags & DEVICE_DTR_SCANNED) ) - { - /* - * Well, we now know the WDTR and SYNC caps of this device since - * it contacted us first, mark it as such and copy the user stuff - * over to the goal stuff. - */ - if( (p->features & AHC_WIDE) && p->user[tindex].width ) - { - aic_dev->goal.width = MSG_EXT_WDTR_BUS_16_BIT; - aic_dev->needwdtr_copy = 1; - } - - /* - * Devices that support DT transfers don't start WDTR requests - */ - aic_dev->goal.options = 0; - - if(p->user[tindex].offset) - { - aic_dev->needsdtr_copy = 1; - aic_dev->goal.period = max_t(unsigned char, 10, p->user[tindex].period); - if(p->features & AHC_ULTRA2) - { - aic_dev->goal.offset = MAX_OFFSET_ULTRA2; - } - else if( aic_dev->goal.width ) - { - aic_dev->goal.offset = MAX_OFFSET_16BIT; - } - else - { - aic_dev->goal.offset = MAX_OFFSET_8BIT; - } - } else { - aic_dev->needsdtr_copy = 0; - aic_dev->goal.period = 255; - aic_dev->goal.offset = 0; - } - - aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR; - } - else if (aic_dev->needwdtr_copy == 0) - { - /* - * This is a preemptive message from the target, we've already - * scanned this target and set our options for it, and we - * don't need a WDTR with this target (for whatever reason), - * so reject this incoming WDTR - */ - reject = TRUE; - break; - } - - /* The device is sending this message first and we have to reply */ - reply = TRUE; - - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Received pre-emptive WDTR message from " - "target.\n", p->host_no, CTL_OF_SCB(scb)); - } - switch(bus_width) - { - case MSG_EXT_WDTR_BUS_16_BIT: - { - if ( (p->features & AHC_WIDE) && - (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) ) - { - new_bus_width = MSG_EXT_WDTR_BUS_16_BIT; - break; - } - } /* Fall through if we aren't a wide card */ - default: - case MSG_EXT_WDTR_BUS_8_BIT: - { - aic_dev->needwdtr_copy = 0; - new_bus_width = MSG_EXT_WDTR_BUS_8_BIT; - break; - } - } - scb->flags &= ~SCB_MSGOUT_BITS; - scb->flags |= SCB_MSGOUT_WDTR; - aic_dev->needwdtr = 0; - if(aic_dev->dtr_pending == 0) - { - /* there is no other command with SCB_DTR_SCB already set that will - * trigger the release of the dtr_pending bit. Both set the bit - * and set scb->flags |= SCB_DTR_SCB - */ - aic_dev->dtr_pending = 1; - scb->flags |= SCB_DTR_SCB; - } - aic_outb(p, HOST_MSG, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); - /* when sending a reply, make sure that the goal settings are - * updated along with current and active since the code that - * will actually build the message for the sequencer uses the - * goal settings as its guidelines. - */ - aic7xxx_set_width(p, target, channel, lun, new_bus_width, - AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, - aic_dev); - } - - /* - * By virtue of the SCSI spec, a WDTR message negates any existing - * SDTR negotiations. So, even if needsdtr isn't marked for this - * device, we still have to do a new SDTR message if the device - * supports SDTR at all. Therefore, we check needsdtr_copy instead - * of needstr. - */ - aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, - AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, - aic_dev); - aic_dev->needsdtr = aic_dev->needsdtr_copy; - done = TRUE; - break; - } - case MSG_EXT_PPR: - { - - if (p->msg_buf[1] != MSG_EXT_PPR_LEN) - { - reject = TRUE; - break; - } - - if (p->msg_len < (MSG_EXT_PPR_LEN + 2)) - { - break; - } - - period = new_period = p->msg_buf[3]; - offset = new_offset = p->msg_buf[5]; - bus_width = new_bus_width = p->msg_buf[6]; - trans_options = new_trans_options = p->msg_buf[7] & 0xf; - - if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Parsing PPR message (%d/%d/%d/%d)\n", - p->host_no, CTL_OF_SCB(scb), period, offset, bus_width, - trans_options); - } - - /* - * We might have a device that is starting negotiation with us - * before we can start up negotiation with it....be prepared to - * have a device ask for a higher speed then we want to give it - * in that case - */ - if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR)) != - (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR) ) - { - /* Have we scanned the device yet? */ - if (!(aic_dev->flags & DEVICE_DTR_SCANNED)) - { - /* The device is electing to use PPR messages, so we will too until - * we know better */ - aic_dev->needppr = aic_dev->needppr_copy = 1; - aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; - aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; - - /* We know the device is SCSI-3 compliant due to PPR */ - aic_dev->flags |= DEVICE_SCSI_3; - - /* - * Not only is the device starting this up, but it also hasn't - * been scanned yet, so this would likely be our TUR or our - * INQUIRY command at scan time, so we need to use the - * settings from the SEEPROM if they existed. Of course, even - * if we didn't find a SEEPROM, we stuffed default values into - * the user settings anyway, so use those in all cases. - */ - aic_dev->goal.width = p->user[tindex].width; - if(p->user[tindex].offset) - { - aic_dev->goal.period = p->user[tindex].period; - aic_dev->goal.options = p->user[tindex].options; - if(p->features & AHC_ULTRA2) - { - aic_dev->goal.offset = MAX_OFFSET_ULTRA2; - } - else if( aic_dev->goal.width && - (bus_width == MSG_EXT_WDTR_BUS_16_BIT) && - p->features & AHC_WIDE ) - { - aic_dev->goal.offset = MAX_OFFSET_16BIT; - } - else - { - aic_dev->goal.offset = MAX_OFFSET_8BIT; - } - } - else - { - aic_dev->goal.period = 255; - aic_dev->goal.offset = 0; - aic_dev->goal.options = 0; - } - aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR; - } - else if (aic_dev->needppr_copy == 0) - { - /* - * This is a preemptive message from the target, we've already - * scanned this target and set our options for it, and we - * don't need a PPR with this target (for whatever reason), - * so reject this incoming PPR - */ - reject = TRUE; - break; - } - - /* The device is sending this message first and we have to reply */ - reply = TRUE; - - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Received pre-emptive PPR message from " - "target.\n", p->host_no, CTL_OF_SCB(scb)); - } - - } - - switch(bus_width) - { - case MSG_EXT_WDTR_BUS_16_BIT: - { - if ( (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) && - p->features & AHC_WIDE) - { - break; - } - } - default: - { - if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && - ((aic_dev->flags & DEVICE_PRINT_DTR) || - (aic7xxx_verbose > 0xffff)) ) - { - reply = TRUE; - printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n", - p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width)); - } - } /* We fall through on purpose */ - case MSG_EXT_WDTR_BUS_8_BIT: - { - /* - * According to the spec, if we aren't wide, we also can't be - * Dual Edge so clear the options byte - */ - new_trans_options = 0; - new_bus_width = MSG_EXT_WDTR_BUS_8_BIT; - break; - } - } - - if(reply) - { - /* when sending a reply, make sure that the goal settings are - * updated along with current and active since the code that - * will actually build the message for the sequencer uses the - * goal settings as its guidelines. - */ - aic7xxx_set_width(p, target, channel, lun, new_bus_width, - AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, - aic_dev); - syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync, - &new_trans_options); - aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width); - aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, - new_offset, new_trans_options, - AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, - aic_dev); - } - else - { - aic7xxx_set_width(p, target, channel, lun, new_bus_width, - AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); - syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync, - &new_trans_options); - aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width); - aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, - new_offset, new_trans_options, - AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); - } - - /* - * As it turns out, if we don't *have* to have PPR messages, then - * configure ourselves not to use them since that makes some - * external drive chassis work (those chassis can't parse PPR - * messages and they mangle the SCSI bus until you send a WDTR - * and SDTR that they can understand). - */ - if(new_trans_options == 0) - { - aic_dev->needppr = aic_dev->needppr_copy = 0; - if(new_offset) - { - aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; - } - if (new_bus_width) - { - aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; - } - } - - if((new_offset == 0) && (offset != 0)) - { - /* - * Oops, the syncrate went to low for this card and we fell off - * to async (should never happen with a device that uses PPR - * messages, but have to be complete) - */ - reply = TRUE; - } - - if(reply) - { - scb->flags &= ~SCB_MSGOUT_BITS; - scb->flags |= SCB_MSGOUT_PPR; - aic_outb(p, HOST_MSG, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); - } - else - { - aic_dev->needppr = 0; - } - done = TRUE; - break; - } - default: - { - reject = TRUE; - break; - } - } /* end of switch(p->msg_type) */ - } /* end of if (!reject && (p->msg_len > 2)) */ - - if (!reply && reject) - { - aic_outb(p, MSG_MESSAGE_REJECT, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); - done = TRUE; - } - return(done); -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_handle_reqinit - * - * Description: - * Interrupt handler for REQINIT interrupts (used to transfer messages to - * and from devices). - *_F*************************************************************************/ -static void -aic7xxx_handle_reqinit(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - unsigned char lastbyte; - unsigned char phasemis; - int done = FALSE; - - switch(p->msg_type) - { - case MSG_TYPE_INITIATOR_MSGOUT: - { - if (p->msg_len == 0) - panic("aic7xxx: REQINIT with no active message!\n"); - - lastbyte = (p->msg_index == (p->msg_len - 1)); - phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK) != P_MESGOUT; - - if (lastbyte || phasemis) - { - /* Time to end the message */ - p->msg_len = 0; - p->msg_type = MSG_TYPE_NONE; - /* - * NOTE-TO-MYSELF: If you clear the REQINIT after you - * disable REQINITs, then cases of REJECT_MSG stop working - * and hang the bus - */ - aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1); - aic_outb(p, CLRSCSIINT, CLRINT); - p->flags &= ~AHC_HANDLING_REQINITS; - - if (phasemis == 0) - { - aic_outb(p, p->msg_buf[p->msg_index], SINDEX); - aic_outb(p, 0, RETURN_1); -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - printk(INFO_LEAD "Completed sending of REQINIT message.\n", - p->host_no, CTL_OF_SCB(scb)); -#endif - } - else - { - aic_outb(p, MSGOUT_PHASEMIS, RETURN_1); -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - printk(INFO_LEAD "PHASEMIS while sending REQINIT message.\n", - p->host_no, CTL_OF_SCB(scb)); -#endif - } - unpause_sequencer(p, TRUE); - } - else - { - /* - * Present the byte on the bus (clearing REQINIT) but don't - * unpause the sequencer. - */ - aic_outb(p, CLRREQINIT, CLRSINT1); - aic_outb(p, CLRSCSIINT, CLRINT); - aic_outb(p, p->msg_buf[p->msg_index++], SCSIDATL); - } - break; - } - case MSG_TYPE_INITIATOR_MSGIN: - { - phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK ) != P_MESGIN; - - if (phasemis == 0) - { - p->msg_len++; - /* Pull the byte in without acking it */ - p->msg_buf[p->msg_index] = aic_inb(p, SCSIBUSL); - done = aic7xxx_parse_msg(p, scb); - /* Ack the byte */ - aic_outb(p, CLRREQINIT, CLRSINT1); - aic_outb(p, CLRSCSIINT, CLRINT); - aic_inb(p, SCSIDATL); - p->msg_index++; - } - if (phasemis || done) - { -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - { - if (phasemis) - printk(INFO_LEAD "PHASEMIS while receiving REQINIT message.\n", - p->host_no, CTL_OF_SCB(scb)); - else - printk(INFO_LEAD "Completed receipt of REQINIT message.\n", - p->host_no, CTL_OF_SCB(scb)); - } -#endif - /* Time to end our message session */ - p->msg_len = 0; - p->msg_type = MSG_TYPE_NONE; - aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1); - aic_outb(p, CLRSCSIINT, CLRINT); - p->flags &= ~AHC_HANDLING_REQINITS; - unpause_sequencer(p, TRUE); - } - break; - } - default: - { - panic("aic7xxx: Unknown REQINIT message type.\n"); - break; - } - } /* End of switch(p->msg_type) */ -} - -/*+F************************************************************************* - * Function: - * aic7xxx_handle_scsiint - * - * Description: - * Interrupt handler for SCSI interrupts (SCSIINT). - *-F*************************************************************************/ -static void -aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat) -{ - unsigned char scb_index; - unsigned char status; - struct aic7xxx_scb *scb; - struct aic_dev_data *aic_dev; - - scb_index = aic_inb(p, SCB_TAG); - status = aic_inb(p, SSTAT1); - - if (scb_index < p->scb_data->numscbs) - { - scb = p->scb_data->scb_array[scb_index]; - if ((scb->flags & SCB_ACTIVE) == 0) - { - scb = NULL; - } - } - else - { - scb = NULL; - } - - - if ((status & SCSIRSTI) != 0) - { - int channel; - - if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) - channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3; - else - channel = 0; - - if (aic7xxx_verbose & VERBOSE_RESET) - printk(WARN_LEAD "Someone else reset the channel!!\n", - p->host_no, channel, -1, -1); - if (aic7xxx_panic_on_abort) - aic7xxx_panic_abort(p, NULL); - /* - * Go through and abort all commands for the channel, but do not - * reset the channel again. - */ - aic7xxx_reset_channel(p, channel, /* Initiate Reset */ FALSE); - aic7xxx_run_done_queue(p, TRUE); - scb = NULL; - } - else if ( ((status & BUSFREE) != 0) && ((status & SELTO) == 0) ) - { - /* - * First look at what phase we were last in. If it's message-out, - * chances are pretty good that the bus free was in response to - * one of our abort requests. - */ - unsigned char lastphase = aic_inb(p, LASTPHASE); - unsigned char saved_tcl = aic_inb(p, SAVED_TCL); - unsigned char target = (saved_tcl >> 4) & 0x0F; - int channel; - int printerror = TRUE; - - if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) - channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3; - else - channel = 0; - - aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), - SCSISEQ); - if (lastphase == P_MESGOUT) - { - unsigned char message; - - message = aic_inb(p, SINDEX); - - if ((message == MSG_ABORT) || (message == MSG_ABORT_TAG)) - { - if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) - printk(INFO_LEAD "SCB %d abort delivered.\n", p->host_no, - CTL_OF_SCB(scb), scb->hscb->tag); - aic7xxx_reset_device(p, target, channel, ALL_LUNS, - (message == MSG_ABORT) ? SCB_LIST_NULL : scb->hscb->tag ); - aic7xxx_run_done_queue(p, TRUE); - scb = NULL; - printerror = 0; - } - else if (message == MSG_BUS_DEV_RESET) - { - aic7xxx_handle_device_reset(p, target, channel); - scb = NULL; - printerror = 0; - } - } - if ( (scb != NULL) && (scb->flags & SCB_DTR_SCB) ) - { - /* - * Hmmm...error during a negotiation command. Either we have a - * borken bus, or the device doesn't like our negotiation message. - * Since we check the INQUIRY data of a device before sending it - * negotiation messages, assume the bus is borken for whatever - * reason. Complete the command. - */ - printerror = 0; - aic7xxx_reset_device(p, target, channel, ALL_LUNS, scb->hscb->tag); - aic7xxx_run_done_queue(p, TRUE); - scb = NULL; - } - if (printerror != 0) - { - if (scb != NULL) - { - unsigned char tag; - - if ((scb->hscb->control & TAG_ENB) != 0) - { - tag = scb->hscb->tag; - } - else - { - tag = SCB_LIST_NULL; - } - aic7xxx_reset_device(p, target, channel, ALL_LUNS, tag); - aic7xxx_run_done_queue(p, TRUE); - } - else - { - aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL); - aic7xxx_run_done_queue(p, TRUE); - } - printk(INFO_LEAD "Unexpected busfree, LASTPHASE = 0x%x, " - "SEQADDR = 0x%x\n", p->host_no, channel, target, -1, lastphase, - (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0)); - scb = NULL; - } - aic_outb(p, MSG_NOOP, MSG_OUT); - aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT), - SIMODE1); - p->flags &= ~AHC_HANDLING_REQINITS; - aic_outb(p, CLRBUSFREE, CLRSINT1); - aic_outb(p, CLRSCSIINT, CLRINT); - restart_sequencer(p); - unpause_sequencer(p, TRUE); - } - else if ((status & SELTO) != 0) - { - unsigned char scbptr; - unsigned char nextscb; - struct scsi_cmnd *cmd; - - scbptr = aic_inb(p, WAITING_SCBH); - if (scbptr > p->scb_data->maxhscbs) - { - /* - * I'm still trying to track down exactly how this happens, but until - * I find it, this code will make sure we aren't passing bogus values - * into the SCBPTR register, even if that register will just wrap - * things around, we still don't like having out of range variables. - * - * NOTE: Don't check the aic7xxx_verbose variable, I want this message - * to always be displayed. - */ - printk(INFO_LEAD "Invalid WAITING_SCBH value %d, improvising.\n", - p->host_no, -1, -1, -1, scbptr); - if (p->scb_data->maxhscbs > 4) - scbptr &= (p->scb_data->maxhscbs - 1); - else - scbptr &= 0x03; - } - aic_outb(p, scbptr, SCBPTR); - scb_index = aic_inb(p, SCB_TAG); - - scb = NULL; - if (scb_index < p->scb_data->numscbs) - { - scb = p->scb_data->scb_array[scb_index]; - if ((scb->flags & SCB_ACTIVE) == 0) - { - scb = NULL; - } - } - if (scb == NULL) - { - printk(WARN_LEAD "Referenced SCB %d not valid during SELTO.\n", - p->host_no, -1, -1, -1, scb_index); - printk(KERN_WARNING " SCSISEQ = 0x%x SEQADDR = 0x%x SSTAT0 = 0x%x " - "SSTAT1 = 0x%x\n", aic_inb(p, SCSISEQ), - aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), - aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); - if (aic7xxx_panic_on_abort) - aic7xxx_panic_abort(p, NULL); - } - else - { - cmd = scb->cmd; - cmd->result = (DID_TIME_OUT << 16); - - /* - * Clear out this hardware SCB - */ - aic_outb(p, 0, SCB_CONTROL); - - /* - * Clear out a few values in the card that are in an undetermined - * state. - */ - aic_outb(p, MSG_NOOP, MSG_OUT); - - /* - * Shift the waiting for selection queue forward - */ - nextscb = aic_inb(p, SCB_NEXT); - aic_outb(p, nextscb, WAITING_SCBH); - - /* - * Put this SCB back on the free list. - */ - aic7xxx_add_curscb_to_free_list(p); -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - printk(INFO_LEAD "Selection Timeout.\n", p->host_no, CTL_OF_SCB(scb)); -#endif - if (scb->flags & SCB_QUEUED_ABORT) - { - /* - * We know that this particular SCB had to be the queued abort since - * the disconnected SCB would have gotten a reconnect instead. - * What we need to do then is to let the command timeout again so - * we get a reset since this abort just failed. - */ - cmd->result = 0; - scb = NULL; - } - } - /* - * Keep the sequencer from trying to restart any selections - */ - aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); - /* - * Make sure the data bits on the bus are released - * Don't do this on 7770 chipsets, it makes them give us - * a BRKADDRINT and kills the card. - */ - if( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI ) - aic_outb(p, 0, SCSIBUSL); - - /* - * Delay for the selection timeout delay period then stop the selection - */ - udelay(301); - aic_outb(p, CLRSELINGO, CLRSINT0); - /* - * Clear out all the interrupt status bits - */ - aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1); - p->flags &= ~AHC_HANDLING_REQINITS; - aic_outb(p, CLRSELTIMEO | CLRBUSFREE, CLRSINT1); - aic_outb(p, CLRSCSIINT, CLRINT); - /* - * Restarting the sequencer will stop the selection and make sure devices - * are allowed to reselect in. - */ - restart_sequencer(p); - unpause_sequencer(p, TRUE); - } - else if (scb == NULL) - { - printk(WARN_LEAD "aic7xxx_isr - referenced scb not valid " - "during scsiint 0x%x scb(%d)\n" - " SIMODE0 0x%x, SIMODE1 0x%x, SSTAT0 0x%x, SEQADDR 0x%x\n", - p->host_no, -1, -1, -1, status, scb_index, aic_inb(p, SIMODE0), - aic_inb(p, SIMODE1), aic_inb(p, SSTAT0), - (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0)); - /* - * Turn off the interrupt and set status to zero, so that it - * falls through the rest of the SCSIINT code. - */ - aic_outb(p, status, CLRSINT1); - aic_outb(p, CLRSCSIINT, CLRINT); - unpause_sequencer(p, /* unpause always */ TRUE); - scb = NULL; - } - else if (status & SCSIPERR) - { - /* - * Determine the bus phase and queue an appropriate message. - */ - char *phase; - struct scsi_cmnd *cmd; - unsigned char mesg_out = MSG_NOOP; - unsigned char lastphase = aic_inb(p, LASTPHASE); - unsigned char sstat2 = aic_inb(p, SSTAT2); - - cmd = scb->cmd; - switch (lastphase) - { - case P_DATAOUT: - phase = "Data-Out"; - break; - case P_DATAIN: - phase = "Data-In"; - mesg_out = MSG_INITIATOR_DET_ERR; - break; - case P_COMMAND: - phase = "Command"; - break; - case P_MESGOUT: - phase = "Message-Out"; - break; - case P_STATUS: - phase = "Status"; - mesg_out = MSG_INITIATOR_DET_ERR; - break; - case P_MESGIN: - phase = "Message-In"; - mesg_out = MSG_PARITY_ERROR; - break; - default: - phase = "unknown"; - break; - } - - /* - * A parity error has occurred during a data - * transfer phase. Flag it and continue. - */ - if( (p->features & AHC_ULTRA3) && - (aic_inb(p, SCSIRATE) & AHC_SYNCRATE_CRC) && - (lastphase == P_DATAIN) ) - { - printk(WARN_LEAD "CRC error during %s phase.\n", - p->host_no, CTL_OF_SCB(scb), phase); - if(sstat2 & CRCVALERR) - { - printk(WARN_LEAD " CRC error in intermediate CRC packet.\n", - p->host_no, CTL_OF_SCB(scb)); - } - if(sstat2 & CRCENDERR) - { - printk(WARN_LEAD " CRC error in ending CRC packet.\n", - p->host_no, CTL_OF_SCB(scb)); - } - if(sstat2 & CRCREQERR) - { - printk(WARN_LEAD " Target incorrectly requested a CRC packet.\n", - p->host_no, CTL_OF_SCB(scb)); - } - if(sstat2 & DUAL_EDGE_ERROR) - { - printk(WARN_LEAD " Dual Edge transmission error.\n", - p->host_no, CTL_OF_SCB(scb)); - } - } - else if( (lastphase == P_MESGOUT) && - (scb->flags & SCB_MSGOUT_PPR) ) - { - /* - * As per the draft specs, any device capable of supporting any of - * the option values other than 0 are not allowed to reject the - * PPR message. Instead, they must negotiate out what they do - * support instead of rejecting our offering or else they cause - * a parity error during msg_out phase to signal that they don't - * like our settings. - */ - aic_dev = AIC_DEV(scb->cmd); - aic_dev->needppr = aic_dev->needppr_copy = 0; - aic7xxx_set_width(p, scb->cmd->device->id, scb->cmd->device->channel, scb->cmd->device->lun, - MSG_EXT_WDTR_BUS_8_BIT, - (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE), - aic_dev); - aic7xxx_set_syncrate(p, NULL, scb->cmd->device->id, scb->cmd->device->channel, 0, 0, - 0, AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, - aic_dev); - aic_dev->goal.options = 0; - scb->flags &= ~SCB_MSGOUT_BITS; - if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "parity error during PPR message, reverting " - "to WDTR/SDTR\n", p->host_no, CTL_OF_SCB(scb)); - } - if ( aic_dev->goal.width ) - { - aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; - } - if ( aic_dev->goal.offset ) - { - if( aic_dev->goal.period <= 9 ) - { - aic_dev->goal.period = 10; - } - aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; - } - scb = NULL; - } - - /* - * We've set the hardware to assert ATN if we get a parity - * error on "in" phases, so all we need to do is stuff the - * message buffer with the appropriate message. "In" phases - * have set mesg_out to something other than MSG_NOP. - */ - if (mesg_out != MSG_NOOP) - { - aic_outb(p, mesg_out, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); - scb = NULL; - } - aic_outb(p, CLRSCSIPERR, CLRSINT1); - aic_outb(p, CLRSCSIINT, CLRINT); - unpause_sequencer(p, /* unpause_always */ TRUE); - } - else if ( (status & REQINIT) && - (p->flags & AHC_HANDLING_REQINITS) ) - { -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic7xxx_verbose > 0xffff) - printk(INFO_LEAD "Handling REQINIT, SSTAT1=0x%x.\n", p->host_no, - CTL_OF_SCB(scb), aic_inb(p, SSTAT1)); -#endif - aic7xxx_handle_reqinit(p, scb); - return; - } - else - { - /* - * We don't know what's going on. Turn off the - * interrupt source and try to continue. - */ - if (aic7xxx_verbose & VERBOSE_SCSIINT) - printk(INFO_LEAD "Unknown SCSIINT status, SSTAT1(0x%x).\n", - p->host_no, -1, -1, -1, status); - aic_outb(p, status, CLRSINT1); - aic_outb(p, CLRSCSIINT, CLRINT); - unpause_sequencer(p, /* unpause always */ TRUE); - scb = NULL; - } - if (scb != NULL) - { - aic7xxx_done(p, scb); - } -} - -#ifdef AIC7XXX_VERBOSE_DEBUGGING -static void -aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer) -{ - unsigned char saved_scbptr, free_scbh, dis_scbh, wait_scbh, temp; - int i, bogus, lost; - static unsigned char scb_status[AIC7XXX_MAXSCB]; - -#define SCB_NO_LIST 0 -#define SCB_FREE_LIST 1 -#define SCB_WAITING_LIST 2 -#define SCB_DISCONNECTED_LIST 4 -#define SCB_CURRENTLY_ACTIVE 8 - - /* - * Note, these checks will fail on a regular basis once the machine moves - * beyond the bus scan phase. The problem is race conditions concerning - * the scbs and where they are linked in. When you have 30 or so commands - * outstanding on the bus, and run this twice with every interrupt, the - * chances get pretty good that you'll catch the sequencer with an SCB - * only partially linked in. Therefore, once we pass the scan phase - * of the bus, we really should disable this function. - */ - bogus = FALSE; - memset(&scb_status[0], 0, sizeof(scb_status)); - pause_sequencer(p); - saved_scbptr = aic_inb(p, SCBPTR); - if (saved_scbptr >= p->scb_data->maxhscbs) - { - printk("Bogus SCBPTR %d\n", saved_scbptr); - bogus = TRUE; - } - scb_status[saved_scbptr] = SCB_CURRENTLY_ACTIVE; - free_scbh = aic_inb(p, FREE_SCBH); - if ( (free_scbh != SCB_LIST_NULL) && - (free_scbh >= p->scb_data->maxhscbs) ) - { - printk("Bogus FREE_SCBH %d\n", free_scbh); - bogus = TRUE; - } - else - { - temp = free_scbh; - while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) ) - { - if(scb_status[temp] & 0x07) - { - printk("HSCB %d on multiple lists, status 0x%02x", temp, - scb_status[temp] | SCB_FREE_LIST); - bogus = TRUE; - } - scb_status[temp] |= SCB_FREE_LIST; - aic_outb(p, temp, SCBPTR); - temp = aic_inb(p, SCB_NEXT); - } - } - - dis_scbh = aic_inb(p, DISCONNECTED_SCBH); - if ( (dis_scbh != SCB_LIST_NULL) && - (dis_scbh >= p->scb_data->maxhscbs) ) - { - printk("Bogus DISCONNECTED_SCBH %d\n", dis_scbh); - bogus = TRUE; - } - else - { - temp = dis_scbh; - while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) ) - { - if(scb_status[temp] & 0x07) - { - printk("HSCB %d on multiple lists, status 0x%02x", temp, - scb_status[temp] | SCB_DISCONNECTED_LIST); - bogus = TRUE; - } - scb_status[temp] |= SCB_DISCONNECTED_LIST; - aic_outb(p, temp, SCBPTR); - temp = aic_inb(p, SCB_NEXT); - } - } - - wait_scbh = aic_inb(p, WAITING_SCBH); - if ( (wait_scbh != SCB_LIST_NULL) && - (wait_scbh >= p->scb_data->maxhscbs) ) - { - printk("Bogus WAITING_SCBH %d\n", wait_scbh); - bogus = TRUE; - } - else - { - temp = wait_scbh; - while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) ) - { - if(scb_status[temp] & 0x07) - { - printk("HSCB %d on multiple lists, status 0x%02x", temp, - scb_status[temp] | SCB_WAITING_LIST); - bogus = TRUE; - } - scb_status[temp] |= SCB_WAITING_LIST; - aic_outb(p, temp, SCBPTR); - temp = aic_inb(p, SCB_NEXT); - } - } - - lost=0; - for(i=0; i < p->scb_data->maxhscbs; i++) - { - aic_outb(p, i, SCBPTR); - temp = aic_inb(p, SCB_NEXT); - if ( ((temp != SCB_LIST_NULL) && - (temp >= p->scb_data->maxhscbs)) ) - { - printk("HSCB %d bad, SCB_NEXT invalid(%d).\n", i, temp); - bogus = TRUE; - } - if ( temp == i ) - { - printk("HSCB %d bad, SCB_NEXT points to self.\n", i); - bogus = TRUE; - } - if (scb_status[i] == 0) - lost++; - if (lost > 1) - { - printk("Too many lost scbs.\n"); - bogus=TRUE; - } - } - aic_outb(p, saved_scbptr, SCBPTR); - unpause_sequencer(p, FALSE); - if (bogus) - { - printk("Bogus parameters found in card SCB array structures.\n"); - printk("%s\n", buffer); - aic7xxx_panic_abort(p, NULL); - } - return; -} -#endif - - -/*+F************************************************************************* - * Function: - * aic7xxx_handle_command_completion_intr - * - * Description: - * SCSI command completion interrupt handler. - *-F*************************************************************************/ -static void -aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p) -{ - struct aic7xxx_scb *scb = NULL; - struct aic_dev_data *aic_dev; - struct scsi_cmnd *cmd; - unsigned char scb_index, tindex; - -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) ) - printk(INFO_LEAD "Command Complete Int.\n", p->host_no, -1, -1, -1); -#endif - - /* - * Read the INTSTAT location after clearing the CMDINT bit. This forces - * any posted PCI writes to flush to memory. Gerard Roudier suggested - * this fix to the possible race of clearing the CMDINT bit but not - * having all command bytes flushed onto the qoutfifo. - */ - aic_outb(p, CLRCMDINT, CLRINT); - aic_inb(p, INTSTAT); - /* - * The sequencer will continue running when it - * issues this interrupt. There may be >1 commands - * finished, so loop until we've processed them all. - */ - - while (p->qoutfifo[p->qoutfifonext] != SCB_LIST_NULL) - { - scb_index = p->qoutfifo[p->qoutfifonext]; - p->qoutfifo[p->qoutfifonext++] = SCB_LIST_NULL; - if ( scb_index >= p->scb_data->numscbs ) - { - printk(WARN_LEAD "CMDCMPLT with invalid SCB index %d\n", p->host_no, - -1, -1, -1, scb_index); - continue; - } - scb = p->scb_data->scb_array[scb_index]; - if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) - { - printk(WARN_LEAD "CMDCMPLT without command for SCB %d, SCB flags " - "0x%x, cmd 0x%lx\n", p->host_no, -1, -1, -1, scb_index, scb->flags, - (unsigned long) scb->cmd); - continue; - } - tindex = TARGET_INDEX(scb->cmd); - aic_dev = AIC_DEV(scb->cmd); - if (scb->flags & SCB_QUEUED_ABORT) - { - pause_sequencer(p); - if ( ((aic_inb(p, LASTPHASE) & PHASE_MASK) != P_BUSFREE) && - (aic_inb(p, SCB_TAG) == scb->hscb->tag) ) - { - unpause_sequencer(p, FALSE); - continue; - } - aic7xxx_reset_device(p, scb->cmd->device->id, scb->cmd->device->channel, - scb->cmd->device->lun, scb->hscb->tag); - scb->flags &= ~(SCB_QUEUED_FOR_DONE | SCB_RESET | SCB_ABORT | - SCB_QUEUED_ABORT); - unpause_sequencer(p, FALSE); - } - else if (scb->flags & SCB_ABORT) - { - /* - * We started to abort this, but it completed on us, let it - * through as successful - */ - scb->flags &= ~(SCB_ABORT|SCB_RESET); - } - else if (scb->flags & SCB_SENSE) - { - char *buffer = &scb->cmd->sense_buffer[0]; - - if (buffer[12] == 0x47 || buffer[12] == 0x54) - { - /* - * Signal that we need to re-negotiate things. - */ - aic_dev->needppr = aic_dev->needppr_copy; - aic_dev->needsdtr = aic_dev->needsdtr_copy; - aic_dev->needwdtr = aic_dev->needwdtr_copy; - } - } - cmd = scb->cmd; - if (scb->hscb->residual_SG_segment_count != 0) - { - aic7xxx_calculate_residual(p, scb); - } - cmd->result |= (aic7xxx_error(cmd) << 16); - aic7xxx_done(p, scb); - } -} - -/*+F************************************************************************* - * Function: - * aic7xxx_isr - * - * Description: - * SCSI controller interrupt handler. - *-F*************************************************************************/ -static void -aic7xxx_isr(void *dev_id) -{ - struct aic7xxx_host *p; - unsigned char intstat; - - p = dev_id; - - /* - * Just a few sanity checks. Make sure that we have an int pending. - * Also, if PCI, then we are going to check for a PCI bus error status - * should we get too many spurious interrupts. - */ - if (!((intstat = aic_inb(p, INTSTAT)) & INT_PEND)) - { -#ifdef CONFIG_PCI - if ( (p->chip & AHC_PCI) && (p->spurious_int > 500) && - !(p->flags & AHC_HANDLING_REQINITS) ) - { - if ( aic_inb(p, ERROR) & PCIERRSTAT ) - { - aic7xxx_pci_intr(p); - } - p->spurious_int = 0; - } - else if ( !(p->flags & AHC_HANDLING_REQINITS) ) - { - p->spurious_int++; - } -#endif - return; - } - - p->spurious_int = 0; - - /* - * Keep track of interrupts for /proc/scsi - */ - p->isr_count++; - -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) && - (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) ) - aic7xxx_check_scbs(p, "Bogus settings at start of interrupt."); -#endif - - /* - * Handle all the interrupt sources - especially for SCSI - * interrupts, we won't get a second chance at them. - */ - if (intstat & CMDCMPLT) - { - aic7xxx_handle_command_completion_intr(p); - } - - if (intstat & BRKADRINT) - { - int i; - unsigned char errno = aic_inb(p, ERROR); - - printk(KERN_ERR "(scsi%d) BRKADRINT error(0x%x):\n", p->host_no, errno); - for (i = 0; i < ARRAY_SIZE(hard_error); i++) - { - if (errno & hard_error[i].errno) - { - printk(KERN_ERR " %s\n", hard_error[i].errmesg); - } - } - printk(KERN_ERR "(scsi%d) SEQADDR=0x%x\n", p->host_no, - (((aic_inb(p, SEQADDR1) << 8) & 0x100) | aic_inb(p, SEQADDR0))); - if (aic7xxx_panic_on_abort) - aic7xxx_panic_abort(p, NULL); -#ifdef CONFIG_PCI - if (errno & PCIERRSTAT) - aic7xxx_pci_intr(p); -#endif - if (errno & (SQPARERR | ILLOPCODE | ILLSADDR)) - { - panic("aic7xxx: unrecoverable BRKADRINT.\n"); - } - if (errno & ILLHADDR) - { - printk(KERN_ERR "(scsi%d) BUG! Driver accessed chip without first " - "pausing controller!\n", p->host_no); - } -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (errno & DPARERR) - { - if (aic_inb(p, DMAPARAMS) & DIRECTION) - printk("(scsi%d) while DMAing SCB from host to card.\n", p->host_no); - else - printk("(scsi%d) while DMAing SCB from card to host.\n", p->host_no); - } -#endif - aic_outb(p, CLRPARERR | CLRBRKADRINT, CLRINT); - unpause_sequencer(p, FALSE); - } - - if (intstat & SEQINT) - { - /* - * Read the CCSCBCTL register to work around a bug in the Ultra2 cards - */ - if(p->features & AHC_ULTRA2) - { - aic_inb(p, CCSCBCTL); - } - aic7xxx_handle_seqint(p, intstat); - } - - if (intstat & SCSIINT) - { - aic7xxx_handle_scsiint(p, intstat); - } - -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) && - (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) ) - aic7xxx_check_scbs(p, "Bogus settings at end of interrupt."); -#endif - -} - -/*+F************************************************************************* - * Function: - * do_aic7xxx_isr - * - * Description: - * This is a gross hack to solve a problem in linux kernels 2.1.85 and - * above. Please, children, do not try this at home, and if you ever see - * anything like it, please inform the Gross Hack Police immediately - *-F*************************************************************************/ -static irqreturn_t -do_aic7xxx_isr(int irq, void *dev_id) -{ - unsigned long cpu_flags; - struct aic7xxx_host *p; - - p = dev_id; - if(!p) - return IRQ_NONE; - spin_lock_irqsave(p->host->host_lock, cpu_flags); - p->flags |= AHC_IN_ISR; - do - { - aic7xxx_isr(dev_id); - } while ( (aic_inb(p, INTSTAT) & INT_PEND) ); - aic7xxx_done_cmds_complete(p); - aic7xxx_run_waiting_queues(p); - p->flags &= ~AHC_IN_ISR; - spin_unlock_irqrestore(p->host->host_lock, cpu_flags); - - return IRQ_HANDLED; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_init_transinfo - * - * Description: - * Set up the initial aic_dev values from the BIOS settings and from - * INQUIRY results - *-F*************************************************************************/ -static void -aic7xxx_init_transinfo(struct aic7xxx_host *p, struct aic_dev_data *aic_dev) -{ - struct scsi_device *sdpnt = aic_dev->SDptr; - unsigned char tindex; - - tindex = sdpnt->id | (sdpnt->channel << 3); - if (!(aic_dev->flags & DEVICE_DTR_SCANNED)) - { - aic_dev->flags |= DEVICE_DTR_SCANNED; - - if ( sdpnt->wdtr && (p->features & AHC_WIDE) ) - { - aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; - aic_dev->goal.width = p->user[tindex].width; - } - else - { - aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; - pause_sequencer(p); - aic7xxx_set_width(p, sdpnt->id, sdpnt->channel, sdpnt->lun, - MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE | - AHC_TRANS_GOAL | - AHC_TRANS_CUR), aic_dev ); - unpause_sequencer(p, FALSE); - } - if ( sdpnt->sdtr && p->user[tindex].offset ) - { - aic_dev->goal.period = p->user[tindex].period; - aic_dev->goal.options = p->user[tindex].options; - if (p->features & AHC_ULTRA2) - aic_dev->goal.offset = MAX_OFFSET_ULTRA2; - else if (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) - aic_dev->goal.offset = MAX_OFFSET_16BIT; - else - aic_dev->goal.offset = MAX_OFFSET_8BIT; - if ( sdpnt->ppr && p->user[tindex].period <= 9 && - p->user[tindex].options ) - { - aic_dev->needppr = aic_dev->needppr_copy = 1; - aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; - aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; - aic_dev->flags |= DEVICE_SCSI_3; - } - else - { - aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; - aic_dev->goal.period = max_t(unsigned char, 10, aic_dev->goal.period); - aic_dev->goal.options = 0; - } - } - else - { - aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; - aic_dev->goal.period = 255; - aic_dev->goal.offset = 0; - aic_dev->goal.options = 0; - } - aic_dev->flags |= DEVICE_PRINT_DTR; - } -} - -/*+F************************************************************************* - * Function: - * aic7xxx_slave_alloc - * - * Description: - * Set up the initial aic_dev struct pointers - *-F*************************************************************************/ -static int -aic7xxx_slave_alloc(struct scsi_device *SDptr) -{ - struct aic7xxx_host *p = (struct aic7xxx_host *)SDptr->host->hostdata; - struct aic_dev_data *aic_dev; - - aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_KERNEL); - if(!aic_dev) - return 1; - /* - * Check to see if channel was scanned. - */ - - if (!(p->flags & AHC_A_SCANNED) && (SDptr->channel == 0)) - { - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(INFO_LEAD "Scanning channel for devices.\n", - p->host_no, 0, -1, -1); - p->flags |= AHC_A_SCANNED; - } - else - { - if (!(p->flags & AHC_B_SCANNED) && (SDptr->channel == 1)) - { - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(INFO_LEAD "Scanning channel for devices.\n", - p->host_no, 1, -1, -1); - p->flags |= AHC_B_SCANNED; - } - } - - memset(aic_dev, 0, sizeof(struct aic_dev_data)); - SDptr->hostdata = aic_dev; - aic_dev->SDptr = SDptr; - aic_dev->max_q_depth = 1; - aic_dev->temp_q_depth = 1; - scbq_init(&aic_dev->delayed_scbs); - INIT_LIST_HEAD(&aic_dev->list); - list_add_tail(&aic_dev->list, &p->aic_devs); - return 0; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_device_queue_depth - * - * Description: - * Determines the queue depth for a given device. There are two ways - * a queue depth can be obtained for a tagged queueing device. One - * way is the default queue depth which is determined by whether - * aic7xxx_default_queue_depth. The other is by the aic7xxx_tag_info - * array. - * - * If tagged queueing isn't supported on the device, then we set the - * depth to p->host->hostt->cmd_per_lun for internal driver queueing. - * as the default queue depth. Otherwise, we use either 4 or 8 as the - * default queue depth (dependent on the number of hardware SCBs). - * The other way we determine queue depth is through the use of the - * aic7xxx_tag_info array which is enabled by defining - * AIC7XXX_TAGGED_QUEUEING_BY_DEVICE. This array can be initialized - * with queue depths for individual devices. It also allows tagged - * queueing to be [en|dis]abled for a specific adapter. - *-F*************************************************************************/ -static void -aic7xxx_device_queue_depth(struct aic7xxx_host *p, struct scsi_device *device) -{ - int tag_enabled = FALSE; - struct aic_dev_data *aic_dev = device->hostdata; - unsigned char tindex; - - tindex = device->id | (device->channel << 3); - - if (device->simple_tags) - return; // We've already enabled this device - - if (device->tagged_supported) - { - tag_enabled = TRUE; - - if (!(p->discenable & (1 << tindex))) - { - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - printk(INFO_LEAD "Disconnection disabled, unable to " - "enable tagged queueing.\n", - p->host_no, device->channel, device->id, device->lun); - tag_enabled = FALSE; - } - else - { - if (p->instance >= ARRAY_SIZE(aic7xxx_tag_info)) - { - static int print_warning = TRUE; - if(print_warning) - { - printk(KERN_INFO "aic7xxx: WARNING, insufficient tag_info instances for" - " installed controllers.\n"); - printk(KERN_INFO "aic7xxx: Please update the aic7xxx_tag_info array in" - " the aic7xxx.c source file.\n"); - print_warning = FALSE; - } - aic_dev->max_q_depth = aic_dev->temp_q_depth = - aic7xxx_default_queue_depth; - } - else - { - - if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 255) - { - tag_enabled = FALSE; - } - else if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 0) - { - aic_dev->max_q_depth = aic_dev->temp_q_depth = - aic7xxx_default_queue_depth; - } - else - { - aic_dev->max_q_depth = aic_dev->temp_q_depth = - aic7xxx_tag_info[p->instance].tag_commands[tindex]; - } - } - } - } - if (tag_enabled) - { - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Tagged queuing enabled, queue depth %d.\n", - p->host_no, device->channel, device->id, - device->lun, aic_dev->max_q_depth); - } - scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, aic_dev->max_q_depth); - } - else - { - if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) - { - printk(INFO_LEAD "Tagged queuing disabled, queue depth %d.\n", - p->host_no, device->channel, device->id, - device->lun, device->host->cmd_per_lun); - } - scsi_adjust_queue_depth(device, 0, device->host->cmd_per_lun); - } - return; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_slave_destroy - * - * Description: - * prepare for this device to go away - *-F*************************************************************************/ -static void -aic7xxx_slave_destroy(struct scsi_device *SDptr) -{ - struct aic_dev_data *aic_dev = SDptr->hostdata; - - list_del(&aic_dev->list); - SDptr->hostdata = NULL; - kfree(aic_dev); - return; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_slave_configure - * - * Description: - * Configure the device we are attaching to the controller. This is - * where we get to do things like scan the INQUIRY data, set queue - * depths, allocate command structs, etc. - *-F*************************************************************************/ -static int -aic7xxx_slave_configure(struct scsi_device *SDptr) -{ - struct aic7xxx_host *p = (struct aic7xxx_host *) SDptr->host->hostdata; - struct aic_dev_data *aic_dev; - int scbnum; - - aic_dev = (struct aic_dev_data *)SDptr->hostdata; - - aic7xxx_init_transinfo(p, aic_dev); - aic7xxx_device_queue_depth(p, SDptr); - if(list_empty(&aic_dev->list)) - list_add_tail(&aic_dev->list, &p->aic_devs); - - scbnum = 0; - list_for_each_entry(aic_dev, &p->aic_devs, list) { - scbnum += aic_dev->max_q_depth; - } - while (scbnum > p->scb_data->numscbs) - { - /* - * Pre-allocate the needed SCBs to get around the possibility of having - * to allocate some when memory is more or less exhausted and we need - * the SCB in order to perform a swap operation (possible deadlock) - */ - if ( aic7xxx_allocate_scb(p) == 0 ) - break; - } - - - return(0); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_probe - * - * Description: - * Probing for EISA boards: it looks like the first two bytes - * are a manufacturer code - three characters, five bits each: - * - * BYTE 0 BYTE 1 BYTE 2 BYTE 3 - * ?1111122 22233333 PPPPPPPP RRRRRRRR - * - * The characters are baselined off ASCII '@', so add that value - * to each to get the real ASCII code for it. The next two bytes - * appear to be a product and revision number, probably vendor- - * specific. This is what is being searched for at each port, - * and what should probably correspond to the ID= field in the - * ECU's .cfg file for the card - if your card is not detected, - * make sure your signature is listed in the array. - * - * The fourth byte's lowest bit seems to be an enabled/disabled - * flag (rest of the bits are reserved?). - * - * NOTE: This function is only needed on Intel and Alpha platforms, - * the other platforms we support don't have EISA/VLB busses. So, - * we #ifdef this entire function to avoid compiler warnings about - * an unused function. - *-F*************************************************************************/ -#if defined(__i386__) || defined(__alpha__) -static int -aic7xxx_probe(int slot, int base, ahc_flag_type *flags) -{ - int i; - unsigned char buf[4]; - - static struct { - int n; - unsigned char signature[sizeof(buf)]; - ahc_chip type; - int bios_disabled; - } AIC7xxx[] = { - { 4, { 0x04, 0x90, 0x77, 0x70 }, - AHC_AIC7770|AHC_EISA, FALSE }, /* mb 7770 */ - { 4, { 0x04, 0x90, 0x77, 0x71 }, - AHC_AIC7770|AHC_EISA, FALSE }, /* host adapter 274x */ - { 4, { 0x04, 0x90, 0x77, 0x56 }, - AHC_AIC7770|AHC_VL, FALSE }, /* 284x BIOS enabled */ - { 4, { 0x04, 0x90, 0x77, 0x57 }, - AHC_AIC7770|AHC_VL, TRUE } /* 284x BIOS disabled */ - }; - - /* - * The VL-bus cards need to be primed by - * writing before a signature check. - */ - for (i = 0; i < sizeof(buf); i++) - { - outb(0x80 + i, base); - buf[i] = inb(base + i); - } - - for (i = 0; i < ARRAY_SIZE(AIC7xxx); i++) - { - /* - * Signature match on enabled card? - */ - if (!memcmp(buf, AIC7xxx[i].signature, AIC7xxx[i].n)) - { - if (inb(base + 4) & 1) - { - if (AIC7xxx[i].bios_disabled) - { - *flags |= AHC_USEDEFAULTS; - } - else - { - *flags |= AHC_BIOS_ENABLED; - } - return (i); - } - - printk("aic7xxx: <Adaptec 7770 SCSI Host Adapter> " - "disabled at slot %d, ignored.\n", slot); - } - } - - return (-1); -} -#endif /* (__i386__) || (__alpha__) */ - - -/*+F************************************************************************* - * Function: - * read_2840_seeprom - * - * Description: - * Reads the 2840 serial EEPROM and returns 1 if successful and 0 if - * not successful. - * - * See read_seeprom (for the 2940) for the instruction set of the 93C46 - * chip. - * - * The 2840 interface to the 93C46 serial EEPROM is through the - * STATUS_2840 and SEECTL_2840 registers. The CS_2840, CK_2840, and - * DO_2840 bits of the SEECTL_2840 register are connected to the chip - * select, clock, and data out lines respectively of the serial EEPROM. - * The DI_2840 bit of the STATUS_2840 is connected to the data in line - * of the serial EEPROM. The EEPROM_TF bit of STATUS_2840 register is - * useful in that it gives us an 800 nsec timer. After a read from the - * SEECTL_2840 register the timing flag is cleared and goes high 800 nsec - * later. - *-F*************************************************************************/ -static int -read_284x_seeprom(struct aic7xxx_host *p, struct seeprom_config *sc) -{ - int i = 0, k = 0; - unsigned char temp; - unsigned short checksum = 0; - unsigned short *seeprom = (unsigned short *) sc; - struct seeprom_cmd { - unsigned char len; - unsigned char bits[3]; - }; - struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; - -#define CLOCK_PULSE(p) \ - while ((aic_inb(p, STATUS_2840) & EEPROM_TF) == 0) \ - { \ - ; /* Do nothing */ \ - } \ - (void) aic_inb(p, SEECTL_2840); - - /* - * Read the first 32 registers of the seeprom. For the 2840, - * the 93C46 SEEPROM is a 1024-bit device with 64 16-bit registers - * but only the first 32 are used by Adaptec BIOS. The loop - * will range from 0 to 31. - */ - for (k = 0; k < (sizeof(*sc) / 2); k++) - { - /* - * Send chip select for one clock cycle. - */ - aic_outb(p, CK_2840 | CS_2840, SEECTL_2840); - CLOCK_PULSE(p); - - /* - * Now we're ready to send the read command followed by the - * address of the 16-bit register we want to read. - */ - for (i = 0; i < seeprom_read.len; i++) - { - temp = CS_2840 | seeprom_read.bits[i]; - aic_outb(p, temp, SEECTL_2840); - CLOCK_PULSE(p); - temp = temp ^ CK_2840; - aic_outb(p, temp, SEECTL_2840); - CLOCK_PULSE(p); - } - /* - * Send the 6 bit address (MSB first, LSB last). - */ - for (i = 5; i >= 0; i--) - { - temp = k; - temp = (temp >> i) & 1; /* Mask out all but lower bit. */ - temp = CS_2840 | temp; - aic_outb(p, temp, SEECTL_2840); - CLOCK_PULSE(p); - temp = temp ^ CK_2840; - aic_outb(p, temp, SEECTL_2840); - CLOCK_PULSE(p); - } - - /* - * Now read the 16 bit register. An initial 0 precedes the - * register contents which begins with bit 15 (MSB) and ends - * with bit 0 (LSB). The initial 0 will be shifted off the - * top of our word as we let the loop run from 0 to 16. - */ - for (i = 0; i <= 16; i++) - { - temp = CS_2840; - aic_outb(p, temp, SEECTL_2840); - CLOCK_PULSE(p); - temp = temp ^ CK_2840; - seeprom[k] = (seeprom[k] << 1) | (aic_inb(p, STATUS_2840) & DI_2840); - aic_outb(p, temp, SEECTL_2840); - CLOCK_PULSE(p); - } - /* - * The serial EEPROM has a checksum in the last word. Keep a - * running checksum for all words read except for the last - * word. We'll verify the checksum after all words have been - * read. - */ - if (k < (sizeof(*sc) / 2) - 1) - { - checksum = checksum + seeprom[k]; - } - - /* - * Reset the chip select for the next command cycle. - */ - aic_outb(p, 0, SEECTL_2840); - CLOCK_PULSE(p); - aic_outb(p, CK_2840, SEECTL_2840); - CLOCK_PULSE(p); - aic_outb(p, 0, SEECTL_2840); - CLOCK_PULSE(p); - } - -#if 0 - printk("Computed checksum 0x%x, checksum read 0x%x\n", checksum, sc->checksum); - printk("Serial EEPROM:"); - for (k = 0; k < (sizeof(*sc) / 2); k++) - { - if (((k % 8) == 0) && (k != 0)) - { - printk("\n "); - } - printk(" 0x%x", seeprom[k]); - } - printk("\n"); -#endif - - if (checksum != sc->checksum) - { - printk("aic7xxx: SEEPROM checksum error, ignoring SEEPROM settings.\n"); - return (0); - } - - return (1); -#undef CLOCK_PULSE -} - -#define CLOCK_PULSE(p) \ - do { \ - int limit = 0; \ - do { \ - mb(); \ - pause_sequencer(p); /* This is just to generate some PCI */ \ - /* traffic so the PCI read is flushed */ \ - /* it shouldn't be needed, but some */ \ - /* chipsets do indeed appear to need */ \ - /* something to force PCI reads to get */ \ - /* flushed */ \ - udelay(1); /* Do nothing */ \ - } while (((aic_inb(p, SEECTL) & SEERDY) == 0) && (++limit < 1000)); \ - } while(0) - -/*+F************************************************************************* - * Function: - * acquire_seeprom - * - * Description: - * Acquires access to the memory port on PCI controllers. - *-F*************************************************************************/ -static int -acquire_seeprom(struct aic7xxx_host *p) -{ - - /* - * Request access of the memory port. When access is - * granted, SEERDY will go high. We use a 1 second - * timeout which should be near 1 second more than - * is needed. Reason: after the 7870 chip reset, there - * should be no contention. - */ - aic_outb(p, SEEMS, SEECTL); - CLOCK_PULSE(p); - if ((aic_inb(p, SEECTL) & SEERDY) == 0) - { - aic_outb(p, 0, SEECTL); - return (0); - } - return (1); -} - -/*+F************************************************************************* - * Function: - * release_seeprom - * - * Description: - * Releases access to the memory port on PCI controllers. - *-F*************************************************************************/ -static void -release_seeprom(struct aic7xxx_host *p) -{ - /* - * Make sure the SEEPROM is ready before we release it. - */ - CLOCK_PULSE(p); - aic_outb(p, 0, SEECTL); -} - -/*+F************************************************************************* - * Function: - * read_seeprom - * - * Description: - * Reads the serial EEPROM and returns 1 if successful and 0 if - * not successful. - * - * The instruction set of the 93C46/56/66 chips is as follows: - * - * Start OP - * Function Bit Code Address Data Description - * ------------------------------------------------------------------- - * READ 1 10 A5 - A0 Reads data stored in memory, - * starting at specified address - * EWEN 1 00 11XXXX Write enable must precede - * all programming modes - * ERASE 1 11 A5 - A0 Erase register A5A4A3A2A1A0 - * WRITE 1 01 A5 - A0 D15 - D0 Writes register - * ERAL 1 00 10XXXX Erase all registers - * WRAL 1 00 01XXXX D15 - D0 Writes to all registers - * EWDS 1 00 00XXXX Disables all programming - * instructions - * *Note: A value of X for address is a don't care condition. - * *Note: The 93C56 and 93C66 have 8 address bits. - * - * - * The 93C46 has a four wire interface: clock, chip select, data in, and - * data out. In order to perform one of the above functions, you need - * to enable the chip select for a clock period (typically a minimum of - * 1 usec, with the clock high and low a minimum of 750 and 250 nsec - * respectively. While the chip select remains high, you can clock in - * the instructions (above) starting with the start bit, followed by the - * OP code, Address, and Data (if needed). For the READ instruction, the - * requested 16-bit register contents is read from the data out line but - * is preceded by an initial zero (leading 0, followed by 16-bits, MSB - * first). The clock cycling from low to high initiates the next data - * bit to be sent from the chip. - * - * The 78xx interface to the 93C46 serial EEPROM is through the SEECTL - * register. After successful arbitration for the memory port, the - * SEECS bit of the SEECTL register is connected to the chip select. - * The SEECK, SEEDO, and SEEDI are connected to the clock, data out, - * and data in lines respectively. The SEERDY bit of SEECTL is useful - * in that it gives us an 800 nsec timer. After a write to the SEECTL - * register, the SEERDY goes high 800 nsec later. The one exception - * to this is when we first request access to the memory port. The - * SEERDY goes high to signify that access has been granted and, for - * this case, has no implied timing. - *-F*************************************************************************/ -static int -read_seeprom(struct aic7xxx_host *p, int offset, - unsigned short *scarray, unsigned int len, seeprom_chip_type chip) -{ - int i = 0, k; - unsigned char temp; - unsigned short checksum = 0; - struct seeprom_cmd { - unsigned char len; - unsigned char bits[3]; - }; - struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; - - /* - * Request access of the memory port. - */ - if (acquire_seeprom(p) == 0) - { - return (0); - } - - /* - * Read 'len' registers of the seeprom. For the 7870, the 93C46 - * SEEPROM is a 1024-bit device with 64 16-bit registers but only - * the first 32 are used by Adaptec BIOS. Some adapters use the - * 93C56 SEEPROM which is a 2048-bit device. The loop will range - * from 0 to 'len' - 1. - */ - for (k = 0; k < len; k++) - { - /* - * Send chip select for one clock cycle. - */ - aic_outb(p, SEEMS | SEECK | SEECS, SEECTL); - CLOCK_PULSE(p); - - /* - * Now we're ready to send the read command followed by the - * address of the 16-bit register we want to read. - */ - for (i = 0; i < seeprom_read.len; i++) - { - temp = SEEMS | SEECS | (seeprom_read.bits[i] << 1); - aic_outb(p, temp, SEECTL); - CLOCK_PULSE(p); - temp = temp ^ SEECK; - aic_outb(p, temp, SEECTL); - CLOCK_PULSE(p); - } - /* - * Send the 6 or 8 bit address (MSB first, LSB last). - */ - for (i = ((int) chip - 1); i >= 0; i--) - { - temp = k + offset; - temp = (temp >> i) & 1; /* Mask out all but lower bit. */ - temp = SEEMS | SEECS | (temp << 1); - aic_outb(p, temp, SEECTL); - CLOCK_PULSE(p); - temp = temp ^ SEECK; - aic_outb(p, temp, SEECTL); - CLOCK_PULSE(p); - } - - /* - * Now read the 16 bit register. An initial 0 precedes the - * register contents which begins with bit 15 (MSB) and ends - * with bit 0 (LSB). The initial 0 will be shifted off the - * top of our word as we let the loop run from 0 to 16. - */ - for (i = 0; i <= 16; i++) - { - temp = SEEMS | SEECS; - aic_outb(p, temp, SEECTL); - CLOCK_PULSE(p); - temp = temp ^ SEECK; - scarray[k] = (scarray[k] << 1) | (aic_inb(p, SEECTL) & SEEDI); - aic_outb(p, temp, SEECTL); - CLOCK_PULSE(p); - } - - /* - * The serial EEPROM should have a checksum in the last word. - * Keep a running checksum for all words read except for the - * last word. We'll verify the checksum after all words have - * been read. - */ - if (k < (len - 1)) - { - checksum = checksum + scarray[k]; - } - - /* - * Reset the chip select for the next command cycle. - */ - aic_outb(p, SEEMS, SEECTL); - CLOCK_PULSE(p); - aic_outb(p, SEEMS | SEECK, SEECTL); - CLOCK_PULSE(p); - aic_outb(p, SEEMS, SEECTL); - CLOCK_PULSE(p); - } - - /* - * Release access to the memory port and the serial EEPROM. - */ - release_seeprom(p); - -#if 0 - printk("Computed checksum 0x%x, checksum read 0x%x\n", - checksum, scarray[len - 1]); - printk("Serial EEPROM:"); - for (k = 0; k < len; k++) - { - if (((k % 8) == 0) && (k != 0)) - { - printk("\n "); - } - printk(" 0x%x", scarray[k]); - } - printk("\n"); -#endif - if ( (checksum != scarray[len - 1]) || (checksum == 0) ) - { - return (0); - } - - return (1); -} - -/*+F************************************************************************* - * Function: - * read_brdctl - * - * Description: - * Reads the BRDCTL register. - *-F*************************************************************************/ -static unsigned char -read_brdctl(struct aic7xxx_host *p) -{ - unsigned char brdctl, value; - - /* - * Make sure the SEEPROM is ready before we access it - */ - CLOCK_PULSE(p); - if (p->features & AHC_ULTRA2) - { - brdctl = BRDRW_ULTRA2; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - value = aic_inb(p, BRDCTL); - CLOCK_PULSE(p); - return(value); - } - brdctl = BRDRW; - if ( !((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) || - (p->flags & AHC_CHNLB) ) - { - brdctl |= BRDCS; - } - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - value = aic_inb(p, BRDCTL); - CLOCK_PULSE(p); - aic_outb(p, 0, BRDCTL); - CLOCK_PULSE(p); - return (value); -} - -/*+F************************************************************************* - * Function: - * write_brdctl - * - * Description: - * Writes a value to the BRDCTL register. - *-F*************************************************************************/ -static void -write_brdctl(struct aic7xxx_host *p, unsigned char value) -{ - unsigned char brdctl; - - /* - * Make sure the SEEPROM is ready before we access it - */ - CLOCK_PULSE(p); - if (p->features & AHC_ULTRA2) - { - brdctl = value; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - brdctl |= BRDSTB_ULTRA2; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - brdctl &= ~BRDSTB_ULTRA2; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - read_brdctl(p); - CLOCK_PULSE(p); - } - else - { - brdctl = BRDSTB; - if ( !((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) || - (p->flags & AHC_CHNLB) ) - { - brdctl |= BRDCS; - } - brdctl = BRDSTB | BRDCS; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - brdctl |= value; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - brdctl &= ~BRDSTB; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - brdctl &= ~BRDCS; - aic_outb(p, brdctl, BRDCTL); - CLOCK_PULSE(p); - } -} - -/*+F************************************************************************* - * Function: - * aic785x_cable_detect - * - * Description: - * Detect the cables that are present on aic785x class controller chips - *-F*************************************************************************/ -static void -aic785x_cable_detect(struct aic7xxx_host *p, int *int_50, - int *ext_present, int *eeprom) -{ - unsigned char brdctl; - - aic_outb(p, BRDRW | BRDCS, BRDCTL); - CLOCK_PULSE(p); - aic_outb(p, 0, BRDCTL); - CLOCK_PULSE(p); - brdctl = aic_inb(p, BRDCTL); - CLOCK_PULSE(p); - *int_50 = !(brdctl & BRDDAT5); - *ext_present = !(brdctl & BRDDAT6); - *eeprom = (aic_inb(p, SPIOCAP) & EEPROM); -} - -#undef CLOCK_PULSE - -/*+F************************************************************************* - * Function: - * aic2940_uwpro_cable_detect - * - * Description: - * Detect the cables that are present on the 2940-UWPro cards - * - * NOTE: This function assumes the SEEPROM will have already been acquired - * prior to invocation of this function. - *-F*************************************************************************/ -static void -aic2940_uwpro_wide_cable_detect(struct aic7xxx_host *p, int *int_68, - int *ext_68, int *eeprom) -{ - unsigned char brdctl; - - /* - * First read the status of our cables. Set the rom bank to - * 0 since the bank setting serves as a multiplexor for the - * cable detection logic. BRDDAT5 controls the bank switch. - */ - write_brdctl(p, 0); - - /* - * Now we read the state of the internal 68 connector. BRDDAT6 - * is don't care, BRDDAT7 is internal 68. The cable is - * present if the bit is 0 - */ - brdctl = read_brdctl(p); - *int_68 = !(brdctl & BRDDAT7); - - /* - * Set the bank bit in brdctl and then read the external cable state - * and the EEPROM status - */ - write_brdctl(p, BRDDAT5); - brdctl = read_brdctl(p); - - *ext_68 = !(brdctl & BRDDAT6); - *eeprom = !(brdctl & BRDDAT7); - - /* - * We're done, the calling function will release the SEEPROM for us - */ -} - -/*+F************************************************************************* - * Function: - * aic787x_cable_detect - * - * Description: - * Detect the cables that are present on aic787x class controller chips - * - * NOTE: This function assumes the SEEPROM will have already been acquired - * prior to invocation of this function. - *-F*************************************************************************/ -static void -aic787x_cable_detect(struct aic7xxx_host *p, int *int_50, int *int_68, - int *ext_present, int *eeprom) -{ - unsigned char brdctl; - - /* - * First read the status of our cables. Set the rom bank to - * 0 since the bank setting serves as a multiplexor for the - * cable detection logic. BRDDAT5 controls the bank switch. - */ - write_brdctl(p, 0); - - /* - * Now we read the state of the two internal connectors. BRDDAT6 - * is internal 50, BRDDAT7 is internal 68. For each, the cable is - * present if the bit is 0 - */ - brdctl = read_brdctl(p); - *int_50 = !(brdctl & BRDDAT6); - *int_68 = !(brdctl & BRDDAT7); - - /* - * Set the bank bit in brdctl and then read the external cable state - * and the EEPROM status - */ - write_brdctl(p, BRDDAT5); - brdctl = read_brdctl(p); - - *ext_present = !(brdctl & BRDDAT6); - *eeprom = !(brdctl & BRDDAT7); - - /* - * We're done, the calling function will release the SEEPROM for us - */ -} - -/*+F************************************************************************* - * Function: - * aic787x_ultra2_term_detect - * - * Description: - * Detect the termination settings present on ultra2 class controllers - * - * NOTE: This function assumes the SEEPROM will have already been acquired - * prior to invocation of this function. - *-F*************************************************************************/ -static void -aic7xxx_ultra2_term_detect(struct aic7xxx_host *p, int *enableSE_low, - int *enableSE_high, int *enableLVD_low, - int *enableLVD_high, int *eprom_present) -{ - unsigned char brdctl; - - brdctl = read_brdctl(p); - - *eprom_present = (brdctl & BRDDAT7); - *enableSE_high = (brdctl & BRDDAT6); - *enableSE_low = (brdctl & BRDDAT5); - *enableLVD_high = (brdctl & BRDDAT4); - *enableLVD_low = (brdctl & BRDDAT3); -} - -/*+F************************************************************************* - * Function: - * configure_termination - * - * Description: - * Configures the termination settings on PCI adapters that have - * SEEPROMs available. - *-F*************************************************************************/ -static void -configure_termination(struct aic7xxx_host *p) -{ - int internal50_present = 0; - int internal68_present = 0; - int external_present = 0; - int eprom_present = 0; - int enableSE_low = 0; - int enableSE_high = 0; - int enableLVD_low = 0; - int enableLVD_high = 0; - unsigned char brddat = 0; - unsigned char max_target = 0; - unsigned char sxfrctl1 = aic_inb(p, SXFRCTL1); - - if (acquire_seeprom(p)) - { - if (p->features & (AHC_WIDE|AHC_TWIN)) - max_target = 16; - else - max_target = 8; - aic_outb(p, SEEMS | SEECS, SEECTL); - sxfrctl1 &= ~STPWEN; - /* - * The termination/cable detection logic is split into three distinct - * groups. Ultra2 and later controllers, 2940UW-Pro controllers, and - * older 7850, 7860, 7870, 7880, and 7895 controllers. Each has its - * own unique way of detecting their cables and writing the results - * back to the card. - */ - if (p->features & AHC_ULTRA2) - { - /* - * As long as user hasn't overridden term settings, always check the - * cable detection logic - */ - if (aic7xxx_override_term == -1) - { - aic7xxx_ultra2_term_detect(p, &enableSE_low, &enableSE_high, - &enableLVD_low, &enableLVD_high, - &eprom_present); - } - - /* - * If the user is overriding settings, then they have been preserved - * to here as fake adapter_control entries. Parse them and allow - * them to override the detected settings (if we even did detection). - */ - if (!(p->adapter_control & CFSEAUTOTERM)) - { - enableSE_low = (p->adapter_control & CFSTERM); - enableSE_high = (p->adapter_control & CFWSTERM); - } - if (!(p->adapter_control & CFAUTOTERM)) - { - enableLVD_low = enableLVD_high = (p->adapter_control & CFLVDSTERM); - } - - /* - * Now take those settings that we have and translate them into the - * values that must be written into the registers. - * - * Flash Enable = BRDDAT7 - * Secondary High Term Enable = BRDDAT6 - * Secondary Low Term Enable = BRDDAT5 - * LVD/Primary High Term Enable = BRDDAT4 - * LVD/Primary Low Term Enable = STPWEN bit in SXFRCTL1 - */ - if (enableLVD_low != 0) - { - sxfrctl1 |= STPWEN; - p->flags |= AHC_TERM_ENB_LVD; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) LVD/Primary Low byte termination " - "Enabled\n", p->host_no); - } - - if (enableLVD_high != 0) - { - brddat |= BRDDAT4; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) LVD/Primary High byte termination " - "Enabled\n", p->host_no); - } - - if (enableSE_low != 0) - { - brddat |= BRDDAT5; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) Secondary Low byte termination " - "Enabled\n", p->host_no); - } - - if (enableSE_high != 0) - { - brddat |= BRDDAT6; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) Secondary High byte termination " - "Enabled\n", p->host_no); - } - } - else if (p->features & AHC_NEW_AUTOTERM) - { - /* - * The 50 pin connector termination is controlled by STPWEN in the - * SXFRCTL1 register. Since the Adaptec docs typically say the - * controller is not allowed to be in the middle of a cable and - * this is the only connection on that stub of the bus, there is - * no need to even check for narrow termination, it's simply - * always on. - */ - sxfrctl1 |= STPWEN; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) Narrow channel termination Enabled\n", - p->host_no); - - if (p->adapter_control & CFAUTOTERM) - { - aic2940_uwpro_wide_cable_detect(p, &internal68_present, - &external_present, - &eprom_present); - printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, " - "Ext-68 %s)\n", p->host_no, - "Don't Care", - internal68_present ? "YES" : "NO", - external_present ? "YES" : "NO"); - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no, - eprom_present ? "is" : "is not"); - if (internal68_present && external_present) - { - brddat = 0; - p->flags &= ~AHC_TERM_ENB_SE_HIGH; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) Wide channel termination Disabled\n", - p->host_no); - } - else - { - brddat = BRDDAT6; - p->flags |= AHC_TERM_ENB_SE_HIGH; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) Wide channel termination Enabled\n", - p->host_no); - } - } - else - { - /* - * The termination of the Wide channel is done more like normal - * though, and the setting of this termination is done by writing - * either a 0 or 1 to BRDDAT6 of the BRDDAT register - */ - if (p->adapter_control & CFWSTERM) - { - brddat = BRDDAT6; - p->flags |= AHC_TERM_ENB_SE_HIGH; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) Wide channel termination Enabled\n", - p->host_no); - } - else - { - brddat = 0; - } - } - } - else - { - if (p->adapter_control & CFAUTOTERM) - { - if (p->flags & AHC_MOTHERBOARD) - { - printk(KERN_INFO "(scsi%d) Warning - detected auto-termination\n", - p->host_no); - printk(KERN_INFO "(scsi%d) Please verify driver detected settings " - "are correct.\n", p->host_no); - printk(KERN_INFO "(scsi%d) If not, then please properly set the " - "device termination\n", p->host_no); - printk(KERN_INFO "(scsi%d) in the Adaptec SCSI BIOS by hitting " - "CTRL-A when prompted\n", p->host_no); - printk(KERN_INFO "(scsi%d) during machine bootup.\n", p->host_no); - } - /* Configure auto termination. */ - - if ( (p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870 ) - { - aic787x_cable_detect(p, &internal50_present, &internal68_present, - &external_present, &eprom_present); - } - else - { - aic785x_cable_detect(p, &internal50_present, &external_present, - &eprom_present); - } - - if (max_target <= 8) - internal68_present = 0; - - if (max_target > 8) - { - printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, " - "Ext-68 %s)\n", p->host_no, - internal50_present ? "YES" : "NO", - internal68_present ? "YES" : "NO", - external_present ? "YES" : "NO"); - } - else - { - printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Ext-50 %s)\n", - p->host_no, - internal50_present ? "YES" : "NO", - external_present ? "YES" : "NO"); - } - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no, - eprom_present ? "is" : "is not"); - - /* - * Now set the termination based on what we found. BRDDAT6 - * controls wide termination enable. - * Flash Enable = BRDDAT7 - * SE High Term Enable = BRDDAT6 - */ - if (internal50_present && internal68_present && external_present) - { - printk(KERN_INFO "(scsi%d) Illegal cable configuration!! Only two\n", - p->host_no); - printk(KERN_INFO "(scsi%d) connectors on the SCSI controller may be " - "in use at a time!\n", p->host_no); - /* - * Force termination (low and high byte) on. This is safer than - * leaving it completely off, especially since this message comes - * most often from motherboard controllers that don't even have 3 - * connectors, but instead are failing the cable detection. - */ - internal50_present = external_present = 0; - enableSE_high = enableSE_low = 1; - } - - if ((max_target > 8) && - ((external_present == 0) || (internal68_present == 0)) ) - { - brddat |= BRDDAT6; - p->flags |= AHC_TERM_ENB_SE_HIGH; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n", - p->host_no); - } - - if ( ((internal50_present ? 1 : 0) + - (internal68_present ? 1 : 0) + - (external_present ? 1 : 0)) <= 1 ) - { - sxfrctl1 |= STPWEN; - p->flags |= AHC_TERM_ENB_SE_LOW; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n", - p->host_no); - } - } - else /* p->adapter_control & CFAUTOTERM */ - { - if (p->adapter_control & CFSTERM) - { - sxfrctl1 |= STPWEN; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n", - p->host_no); - } - - if (p->adapter_control & CFWSTERM) - { - brddat |= BRDDAT6; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n", - p->host_no); - } - } - } - - aic_outb(p, sxfrctl1, SXFRCTL1); - write_brdctl(p, brddat); - release_seeprom(p); - } -} - -/*+F************************************************************************* - * Function: - * detect_maxscb - * - * Description: - * Detects the maximum number of SCBs for the controller and returns - * the count and a mask in p (p->maxscbs, p->qcntmask). - *-F*************************************************************************/ -static void -detect_maxscb(struct aic7xxx_host *p) -{ - int i; - - /* - * It's possible that we've already done this for multichannel - * adapters. - */ - if (p->scb_data->maxhscbs == 0) - { - /* - * We haven't initialized the SCB settings yet. Walk the SCBs to - * determince how many there are. - */ - aic_outb(p, 0, FREE_SCBH); - - for (i = 0; i < AIC7XXX_MAXSCB; i++) - { - aic_outb(p, i, SCBPTR); - aic_outb(p, i, SCB_CONTROL); - if (aic_inb(p, SCB_CONTROL) != i) - break; - aic_outb(p, 0, SCBPTR); - if (aic_inb(p, SCB_CONTROL) != 0) - break; - - aic_outb(p, i, SCBPTR); - aic_outb(p, 0, SCB_CONTROL); /* Clear the control byte. */ - aic_outb(p, i + 1, SCB_NEXT); /* Set the next pointer. */ - aic_outb(p, SCB_LIST_NULL, SCB_TAG); /* Make the tag invalid. */ - aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS); /* no busy untagged */ - aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+1);/* targets active yet */ - aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+2); - aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+3); - } - - /* Make sure the last SCB terminates the free list. */ - aic_outb(p, i - 1, SCBPTR); - aic_outb(p, SCB_LIST_NULL, SCB_NEXT); - - /* Ensure we clear the first (0) SCBs control byte. */ - aic_outb(p, 0, SCBPTR); - aic_outb(p, 0, SCB_CONTROL); - - p->scb_data->maxhscbs = i; - /* - * Use direct indexing instead for speed - */ - if ( i == AIC7XXX_MAXSCB ) - p->flags &= ~AHC_PAGESCBS; - } - -} - -/*+F************************************************************************* - * Function: - * aic7xxx_register - * - * Description: - * Register a Adaptec aic7xxx chip SCSI controller with the kernel. - *-F*************************************************************************/ -static int -aic7xxx_register(struct scsi_host_template *template, struct aic7xxx_host *p, - int reset_delay) -{ - int i, result; - int max_targets; - int found = 1; - unsigned char term, scsi_conf; - struct Scsi_Host *host; - - host = p->host; - - p->scb_data->maxscbs = AIC7XXX_MAXSCB; - host->can_queue = AIC7XXX_MAXSCB; - host->cmd_per_lun = 3; - host->sg_tablesize = AIC7XXX_MAX_SG; - host->this_id = p->scsi_id; - host->io_port = p->base; - host->n_io_port = 0xFF; - host->base = p->mbase; - host->irq = p->irq; - if (p->features & AHC_WIDE) - { - host->max_id = 16; - } - if (p->features & AHC_TWIN) - { - host->max_channel = 1; - } - - p->host = host; - p->host_no = host->host_no; - host->unique_id = p->instance; - p->isr_count = 0; - p->next = NULL; - p->completeq.head = NULL; - p->completeq.tail = NULL; - scbq_init(&p->scb_data->free_scbs); - scbq_init(&p->waiting_scbs); - INIT_LIST_HEAD(&p->aic_devs); - - /* - * We currently have no commands of any type - */ - p->qinfifonext = 0; - p->qoutfifonext = 0; - - printk(KERN_INFO "(scsi%d) <%s> found at ", p->host_no, - board_names[p->board_name_index]); - switch(p->chip) - { - case (AHC_AIC7770|AHC_EISA): - printk("EISA slot %d\n", p->pci_device_fn); - break; - case (AHC_AIC7770|AHC_VL): - printk("VLB slot %d\n", p->pci_device_fn); - break; - default: - printk("PCI %d/%d/%d\n", p->pci_bus, PCI_SLOT(p->pci_device_fn), - PCI_FUNC(p->pci_device_fn)); - break; - } - if (p->features & AHC_TWIN) - { - printk(KERN_INFO "(scsi%d) Twin Channel, A SCSI ID %d, B SCSI ID %d, ", - p->host_no, p->scsi_id, p->scsi_id_b); - } - else - { - char *channel; - - channel = ""; - - if ((p->flags & AHC_MULTI_CHANNEL) != 0) - { - channel = " A"; - - if ( (p->flags & (AHC_CHNLB|AHC_CHNLC)) != 0 ) - { - channel = (p->flags & AHC_CHNLB) ? " B" : " C"; - } - } - if (p->features & AHC_WIDE) - { - printk(KERN_INFO "(scsi%d) Wide ", p->host_no); - } - else - { - printk(KERN_INFO "(scsi%d) Narrow ", p->host_no); - } - printk("Channel%s, SCSI ID=%d, ", channel, p->scsi_id); - } - aic_outb(p, 0, SEQ_FLAGS); - - detect_maxscb(p); - - printk("%d/%d SCBs\n", p->scb_data->maxhscbs, p->scb_data->maxscbs); - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk(KERN_INFO "(scsi%d) BIOS %sabled, IO Port 0x%lx, IRQ %d\n", - p->host_no, (p->flags & AHC_BIOS_ENABLED) ? "en" : "dis", - p->base, p->irq); - printk(KERN_INFO "(scsi%d) IO Memory at 0x%lx, MMAP Memory at %p\n", - p->host_no, p->mbase, p->maddr); - } - -#ifdef CONFIG_PCI - /* - * Now that we know our instance number, we can set the flags we need to - * force termination if need be. - */ - if (aic7xxx_stpwlev != -1) - { - /* - * This option only applies to PCI controllers. - */ - if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) - { - unsigned char devconfig; - - pci_read_config_byte(p->pdev, DEVCONFIG, &devconfig); - if ( (aic7xxx_stpwlev >> p->instance) & 0x01 ) - { - devconfig |= STPWLEVEL; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk("(scsi%d) Force setting STPWLEVEL bit\n", p->host_no); - } - else - { - devconfig &= ~STPWLEVEL; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk("(scsi%d) Force clearing STPWLEVEL bit\n", p->host_no); - } - pci_write_config_byte(p->pdev, DEVCONFIG, devconfig); - } - } -#endif - - /* - * That took care of devconfig and stpwlev, now for the actual termination - * settings. - */ - if (aic7xxx_override_term != -1) - { - /* - * Again, this only applies to PCI controllers. We don't have problems - * with the termination on 274x controllers to the best of my knowledge. - */ - if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) - { - unsigned char term_override; - - term_override = ( (aic7xxx_override_term >> (p->instance * 4)) & 0x0f); - p->adapter_control &= - ~(CFSTERM|CFWSTERM|CFLVDSTERM|CFAUTOTERM|CFSEAUTOTERM); - if ( (p->features & AHC_ULTRA2) && (term_override & 0x0c) ) - { - p->adapter_control |= CFLVDSTERM; - } - if (term_override & 0x02) - { - p->adapter_control |= CFWSTERM; - } - if (term_override & 0x01) - { - p->adapter_control |= CFSTERM; - } - } - } - - if ( (p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1) ) - { - if (p->features & AHC_SPIOCAP) - { - if ( aic_inb(p, SPIOCAP) & SSPIOCPS ) - /* - * Update the settings in sxfrctl1 to match the termination - * settings. - */ - configure_termination(p); - } - else if ((p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870) - { - configure_termination(p); - } - } - - /* - * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels - */ - if (p->features & AHC_TWIN) - { - /* Select channel B */ - aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL); - - if ((p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1)) - term = (aic_inb(p, SXFRCTL1) & STPWEN); - else - term = ((p->flags & AHC_TERM_ENB_B) ? STPWEN : 0); - - aic_outb(p, p->scsi_id_b, SCSIID); - scsi_conf = aic_inb(p, SCSICONF + 1); - aic_outb(p, DFON | SPIOEN, SXFRCTL0); - aic_outb(p, (scsi_conf & ENSPCHK) | aic7xxx_seltime | term | - ENSTIMER | ACTNEGEN, SXFRCTL1); - aic_outb(p, 0, SIMODE0); - aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1); - aic_outb(p, 0, SCSIRATE); - - /* Select channel A */ - aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL); - } - - if (p->features & AHC_ULTRA2) - { - aic_outb(p, p->scsi_id, SCSIID_ULTRA2); - } - else - { - aic_outb(p, p->scsi_id, SCSIID); - } - if ((p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1)) - term = (aic_inb(p, SXFRCTL1) & STPWEN); - else - term = ((p->flags & (AHC_TERM_ENB_A|AHC_TERM_ENB_LVD)) ? STPWEN : 0); - scsi_conf = aic_inb(p, SCSICONF); - aic_outb(p, DFON | SPIOEN, SXFRCTL0); - aic_outb(p, (scsi_conf & ENSPCHK) | aic7xxx_seltime | term | - ENSTIMER | ACTNEGEN, SXFRCTL1); - aic_outb(p, 0, SIMODE0); - /* - * If we are a cardbus adapter then don't enable SCSI reset detection. - * We shouldn't likely be sharing SCSI busses with someone else, and - * if we don't have a cable currently plugged into the controller then - * we won't have a power source for the SCSI termination, which means - * we'll see infinite incoming bus resets. - */ - if(p->flags & AHC_NO_STPWEN) - aic_outb(p, ENSELTIMO | ENSCSIPERR, SIMODE1); - else - aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1); - aic_outb(p, 0, SCSIRATE); - if ( p->features & AHC_ULTRA2) - aic_outb(p, 0, SCSIOFFSET); - - /* - * Look at the information that board initialization or the board - * BIOS has left us. In the lower four bits of each target's - * scratch space any value other than 0 indicates that we should - * initiate synchronous transfers. If it's zero, the user or the - * BIOS has decided to disable synchronous negotiation to that - * target so we don't activate the needsdtr flag. - */ - if ((p->features & (AHC_TWIN|AHC_WIDE)) == 0) - { - max_targets = 8; - } - else - { - max_targets = 16; - } - - if (!(aic7xxx_no_reset)) - { - /* - * If we reset the bus, then clear the transfer settings, else leave - * them be. - */ - aic_outb(p, 0, ULTRA_ENB); - aic_outb(p, 0, ULTRA_ENB + 1); - p->ultraenb = 0; - } - - /* - * Allocate enough hardware scbs to handle the maximum number of - * concurrent transactions we can have. We have to make sure that - * the allocated memory is contiguous memory. The Linux kmalloc - * routine should only allocate contiguous memory, but note that - * this could be a problem if kmalloc() is changed. - */ - { - size_t array_size; - unsigned int hscb_physaddr; - - array_size = p->scb_data->maxscbs * sizeof(struct aic7xxx_hwscb); - if (p->scb_data->hscbs == NULL) - { - /* pci_alloc_consistent enforces the alignment already and - * clears the area as well. - */ - p->scb_data->hscbs = pci_alloc_consistent(p->pdev, array_size, - &p->scb_data->hscbs_dma); - /* We have to use pci_free_consistent, not kfree */ - p->scb_data->hscb_kmalloc_ptr = NULL; - p->scb_data->hscbs_dma_len = array_size; - } - if (p->scb_data->hscbs == NULL) - { - printk("(scsi%d) Unable to allocate hardware SCB array; " - "failing detection.\n", p->host_no); - aic_outb(p, 0, SIMODE1); - p->irq = 0; - return(0); - } - - hscb_physaddr = p->scb_data->hscbs_dma; - aic_outb(p, hscb_physaddr & 0xFF, HSCB_ADDR); - aic_outb(p, (hscb_physaddr >> 8) & 0xFF, HSCB_ADDR + 1); - aic_outb(p, (hscb_physaddr >> 16) & 0xFF, HSCB_ADDR + 2); - aic_outb(p, (hscb_physaddr >> 24) & 0xFF, HSCB_ADDR + 3); - - /* Set up the fifo areas at the same time */ - p->untagged_scbs = pci_alloc_consistent(p->pdev, 3*256, &p->fifo_dma); - if (p->untagged_scbs == NULL) - { - printk("(scsi%d) Unable to allocate hardware FIFO arrays; " - "failing detection.\n", p->host_no); - p->irq = 0; - return(0); - } - - p->qoutfifo = p->untagged_scbs + 256; - p->qinfifo = p->qoutfifo + 256; - for (i = 0; i < 256; i++) - { - p->untagged_scbs[i] = SCB_LIST_NULL; - p->qinfifo[i] = SCB_LIST_NULL; - p->qoutfifo[i] = SCB_LIST_NULL; - } - - hscb_physaddr = p->fifo_dma; - aic_outb(p, hscb_physaddr & 0xFF, SCBID_ADDR); - aic_outb(p, (hscb_physaddr >> 8) & 0xFF, SCBID_ADDR + 1); - aic_outb(p, (hscb_physaddr >> 16) & 0xFF, SCBID_ADDR + 2); - aic_outb(p, (hscb_physaddr >> 24) & 0xFF, SCBID_ADDR + 3); - } - - /* The Q-FIFOs we just set up are all empty */ - aic_outb(p, 0, QINPOS); - aic_outb(p, 0, KERNEL_QINPOS); - aic_outb(p, 0, QOUTPOS); - - if(p->features & AHC_QUEUE_REGS) - { - aic_outb(p, SCB_QSIZE_256, QOFF_CTLSTA); - aic_outb(p, 0, SDSCB_QOFF); - aic_outb(p, 0, SNSCB_QOFF); - aic_outb(p, 0, HNSCB_QOFF); - } - - /* - * We don't have any waiting selections or disconnected SCBs. - */ - aic_outb(p, SCB_LIST_NULL, WAITING_SCBH); - aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH); - - /* - * Message out buffer starts empty - */ - aic_outb(p, MSG_NOOP, MSG_OUT); - aic_outb(p, MSG_NOOP, LAST_MSG); - - /* - * Set all the other asundry items that haven't been set yet. - * This includes just dumping init values to a lot of registers simply - * to make sure they've been touched and are ready for use parity wise - * speaking. - */ - aic_outb(p, 0, TMODE_CMDADDR); - aic_outb(p, 0, TMODE_CMDADDR + 1); - aic_outb(p, 0, TMODE_CMDADDR + 2); - aic_outb(p, 0, TMODE_CMDADDR + 3); - aic_outb(p, 0, TMODE_CMDADDR_NEXT); - - /* - * Link us into the list of valid hosts - */ - p->next = first_aic7xxx; - first_aic7xxx = p; - - /* - * Allocate the first set of scbs for this controller. This is to stream- - * line code elsewhere in the driver. If we have to check for the existence - * of scbs in certain code sections, it slows things down. However, as - * soon as we register the IRQ for this card, we could get an interrupt that - * includes possibly the SCSI_RSTI interrupt. If we catch that interrupt - * then we are likely to segfault if we don't have at least one chunk of - * SCBs allocated or add checks all through the reset code to make sure - * that the SCBs have been allocated which is an invalid running condition - * and therefore I think it's preferable to simply pre-allocate the first - * chunk of SCBs. - */ - aic7xxx_allocate_scb(p); - - /* - * Load the sequencer program, then re-enable the board - - * resetting the AIC-7770 disables it, leaving the lights - * on with nobody home. - */ - aic7xxx_loadseq(p); - - /* - * Make sure the AUTOFLUSHDIS bit is *not* set in the SBLKCTL register - */ - aic_outb(p, aic_inb(p, SBLKCTL) & ~AUTOFLUSHDIS, SBLKCTL); - - if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) - { - aic_outb(p, ENABLE, BCTL); /* Enable the boards BUS drivers. */ - } - - if ( !(aic7xxx_no_reset) ) - { - if (p->features & AHC_TWIN) - { - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk(KERN_INFO "(scsi%d) Resetting channel B\n", p->host_no); - aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL); - aic7xxx_reset_current_bus(p); - aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL); - } - /* Reset SCSI bus A. */ - if (aic7xxx_verbose & VERBOSE_PROBE2) - { /* In case we are a 3940, 3985, or 7895, print the right channel */ - char *channel = ""; - if (p->flags & AHC_MULTI_CHANNEL) - { - channel = " A"; - if (p->flags & (AHC_CHNLB|AHC_CHNLC)) - channel = (p->flags & AHC_CHNLB) ? " B" : " C"; - } - printk(KERN_INFO "(scsi%d) Resetting channel%s\n", p->host_no, channel); - } - - aic7xxx_reset_current_bus(p); - - } - else - { - if (!reset_delay) - { - printk(KERN_INFO "(scsi%d) Not resetting SCSI bus. Note: Don't use " - "the no_reset\n", p->host_no); - printk(KERN_INFO "(scsi%d) option unless you have a verifiable need " - "for it.\n", p->host_no); - } - } - - /* - * Register IRQ with the kernel. Only allow sharing IRQs with - * PCI devices. - */ - if (!(p->chip & AHC_PCI)) - { - result = (request_irq(p->irq, do_aic7xxx_isr, 0, "aic7xxx", p)); - } - else - { - result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_SHARED, - "aic7xxx", p)); - if (result < 0) - { - result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_DISABLED | IRQF_SHARED, - "aic7xxx", p)); - } - } - if (result < 0) - { - printk(KERN_WARNING "(scsi%d) Couldn't register IRQ %d, ignoring " - "controller.\n", p->host_no, p->irq); - aic_outb(p, 0, SIMODE1); - p->irq = 0; - return (0); - } - - if(aic_inb(p, INTSTAT) & INT_PEND) - printk(INFO_LEAD "spurious interrupt during configuration, cleared.\n", - p->host_no, -1, -1 , -1); - aic7xxx_clear_intstat(p); - - unpause_sequencer(p, /* unpause_always */ TRUE); - - return (found); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_chip_reset - * - * Description: - * Perform a chip reset on the aic7xxx SCSI controller. The controller - * is paused upon return. - *-F*************************************************************************/ -static int -aic7xxx_chip_reset(struct aic7xxx_host *p) -{ - unsigned char sblkctl; - int wait; - - /* - * For some 274x boards, we must clear the CHIPRST bit and pause - * the sequencer. For some reason, this makes the driver work. - */ - aic_outb(p, PAUSE | CHIPRST, HCNTRL); - - /* - * In the future, we may call this function as a last resort for - * error handling. Let's be nice and not do any unnecessary delays. - */ - wait = 1000; /* 1 msec (1000 * 1 msec) */ - while (--wait && !(aic_inb(p, HCNTRL) & CHIPRSTACK)) - { - udelay(1); /* 1 usec */ - } - - pause_sequencer(p); - - sblkctl = aic_inb(p, SBLKCTL) & (SELBUSB|SELWIDE); - if (p->chip & AHC_PCI) - sblkctl &= ~SELBUSB; - switch( sblkctl ) - { - case 0: /* normal narrow card */ - break; - case 2: /* Wide card */ - p->features |= AHC_WIDE; - break; - case 8: /* Twin card */ - p->features |= AHC_TWIN; - p->flags |= AHC_MULTI_CHANNEL; - break; - default: /* hmmm...we don't know what this is */ - printk(KERN_WARNING "aic7xxx: Unsupported adapter type %d, ignoring.\n", - aic_inb(p, SBLKCTL) & 0x0a); - return(-1); - } - return(0); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_alloc - * - * Description: - * Allocate and initialize a host structure. Returns NULL upon error - * and a pointer to a aic7xxx_host struct upon success. - *-F*************************************************************************/ -static struct aic7xxx_host * -aic7xxx_alloc(struct scsi_host_template *sht, struct aic7xxx_host *temp) -{ - struct aic7xxx_host *p = NULL; - struct Scsi_Host *host; - - /* - * Allocate a storage area by registering us with the mid-level - * SCSI layer. - */ - host = scsi_register(sht, sizeof(struct aic7xxx_host)); - - if (host != NULL) - { - p = (struct aic7xxx_host *) host->hostdata; - memset(p, 0, sizeof(struct aic7xxx_host)); - *p = *temp; - p->host = host; - - p->scb_data = kzalloc(sizeof(scb_data_type), GFP_ATOMIC); - if (p->scb_data) - { - scbq_init (&p->scb_data->free_scbs); - } - else - { - /* - * For some reason we don't have enough memory. Free the - * allocated memory for the aic7xxx_host struct, and return NULL. - */ - release_region(p->base, MAXREG - MINREG); - scsi_unregister(host); - return(NULL); - } - p->host_no = host->host_no; - } - return (p); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_free - * - * Description: - * Frees and releases all resources associated with an instance of - * the driver (struct aic7xxx_host *). - *-F*************************************************************************/ -static void -aic7xxx_free(struct aic7xxx_host *p) -{ - int i; - - /* - * Free the allocated hardware SCB space. - */ - if (p->scb_data != NULL) - { - struct aic7xxx_scb_dma *scb_dma = NULL; - if (p->scb_data->hscbs != NULL) - { - pci_free_consistent(p->pdev, p->scb_data->hscbs_dma_len, - p->scb_data->hscbs, p->scb_data->hscbs_dma); - p->scb_data->hscbs = p->scb_data->hscb_kmalloc_ptr = NULL; - } - /* - * Free the driver SCBs. These were allocated on an as-need - * basis. We allocated these in groups depending on how many - * we could fit into a given amount of RAM. The tail SCB for - * these allocations has a pointer to the alloced area. - */ - for (i = 0; i < p->scb_data->numscbs; i++) - { - if (p->scb_data->scb_array[i]->scb_dma != scb_dma) - { - scb_dma = p->scb_data->scb_array[i]->scb_dma; - pci_free_consistent(p->pdev, scb_dma->dma_len, - (void *)((unsigned long)scb_dma->dma_address - - scb_dma->dma_offset), - scb_dma->dma_address); - } - kfree(p->scb_data->scb_array[i]->kmalloc_ptr); - p->scb_data->scb_array[i] = NULL; - } - - /* - * Free the SCB data area. - */ - kfree(p->scb_data); - } - - pci_free_consistent(p->pdev, 3*256, (void *)p->untagged_scbs, p->fifo_dma); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_load_seeprom - * - * Description: - * Load the seeprom and configure adapter and target settings. - * Returns 1 if the load was successful and 0 otherwise. - *-F*************************************************************************/ -static void -aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1) -{ - int have_seeprom = 0; - int i, max_targets, mask; - unsigned char scsirate, scsi_conf; - unsigned short scarray[128]; - struct seeprom_config *sc = (struct seeprom_config *) scarray; - - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk(KERN_INFO "aic7xxx: Loading serial EEPROM..."); - } - switch (p->chip) - { - case (AHC_AIC7770|AHC_EISA): /* None of these adapters have seeproms. */ - if (aic_inb(p, SCSICONF) & TERM_ENB) - p->flags |= AHC_TERM_ENB_A; - if ( (p->features & AHC_TWIN) && (aic_inb(p, SCSICONF + 1) & TERM_ENB) ) - p->flags |= AHC_TERM_ENB_B; - break; - - case (AHC_AIC7770|AHC_VL): - have_seeprom = read_284x_seeprom(p, (struct seeprom_config *) scarray); - break; - - default: - have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)), - scarray, p->sc_size, p->sc_type); - if (!have_seeprom) - { - if(p->sc_type == C46) - have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)), - scarray, p->sc_size, C56_66); - else - have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)), - scarray, p->sc_size, C46); - } - if (!have_seeprom) - { - p->sc_size = 128; - have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)), - scarray, p->sc_size, p->sc_type); - if (!have_seeprom) - { - if(p->sc_type == C46) - have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)), - scarray, p->sc_size, C56_66); - else - have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)), - scarray, p->sc_size, C46); - } - } - break; - } - - if (!have_seeprom) - { - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk("\naic7xxx: No SEEPROM available.\n"); - } - p->flags |= AHC_NEWEEPROM_FMT; - if (aic_inb(p, SCSISEQ) == 0) - { - p->flags |= AHC_USEDEFAULTS; - p->flags &= ~AHC_BIOS_ENABLED; - p->scsi_id = p->scsi_id_b = 7; - *sxfrctl1 |= STPWEN; - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk("aic7xxx: Using default values.\n"); - } - } - else if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk("aic7xxx: Using leftover BIOS values.\n"); - } - if ( ((p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) && (*sxfrctl1 & STPWEN) ) - { - p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH; - sc->adapter_control &= ~CFAUTOTERM; - sc->adapter_control |= CFSTERM | CFWSTERM | CFLVDSTERM; - } - if (aic7xxx_extended) - p->flags |= (AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B); - else - p->flags &= ~(AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B); - } - else - { - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk("done\n"); - } - - /* - * Note things in our flags - */ - p->flags |= AHC_SEEPROM_FOUND; - - /* - * Update the settings in sxfrctl1 to match the termination settings. - */ - *sxfrctl1 = 0; - - /* - * Get our SCSI ID from the SEEPROM setting... - */ - p->scsi_id = (sc->brtime_id & CFSCSIID); - - /* - * First process the settings that are different between the VLB - * and PCI adapter seeproms. - */ - if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7770) - { - /* VLB adapter seeproms */ - if (sc->bios_control & CF284XEXTEND) - p->flags |= AHC_EXTEND_TRANS_A; - - if (sc->adapter_control & CF284XSTERM) - { - *sxfrctl1 |= STPWEN; - p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH; - } - } - else - { - /* PCI adapter seeproms */ - if (sc->bios_control & CFEXTEND) - p->flags |= AHC_EXTEND_TRANS_A; - if (sc->bios_control & CFBIOSEN) - p->flags |= AHC_BIOS_ENABLED; - else - p->flags &= ~AHC_BIOS_ENABLED; - - if (sc->adapter_control & CFSTERM) - { - *sxfrctl1 |= STPWEN; - p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH; - } - } - memcpy(&p->sc, sc, sizeof(struct seeprom_config)); - } - - p->discenable = 0; - - /* - * Limit to 16 targets just in case. The 2842 for one is known to - * blow the max_targets setting, future cards might also. - */ - max_targets = ((p->features & (AHC_TWIN | AHC_WIDE)) ? 16 : 8); - - if (have_seeprom) - { - for (i = 0; i < max_targets; i++) - { - if( ((p->features & AHC_ULTRA) && - !(sc->adapter_control & CFULTRAEN) && - (sc->device_flags[i] & CFSYNCHISULTRA)) || - (sc->device_flags[i] & CFNEWULTRAFORMAT) ) - { - p->flags |= AHC_NEWEEPROM_FMT; - break; - } - } - } - - for (i = 0; i < max_targets; i++) - { - mask = (0x01 << i); - if (!have_seeprom) - { - if (aic_inb(p, SCSISEQ) != 0) - { - /* - * OK...the BIOS set things up and left behind the settings we need. - * Just make our sc->device_flags[i] entry match what the card has - * set for this device. - */ - p->discenable = - ~(aic_inb(p, DISC_DSB) | (aic_inb(p, DISC_DSB + 1) << 8) ); - p->ultraenb = - (aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8) ); - sc->device_flags[i] = (p->discenable & mask) ? CFDISC : 0; - if (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER) - sc->device_flags[i] |= CFWIDEB; - if (p->features & AHC_ULTRA2) - { - if (aic_inb(p, TARG_OFFSET + i)) - { - sc->device_flags[i] |= CFSYNCH; - sc->device_flags[i] |= (aic_inb(p, TARG_SCSIRATE + i) & 0x07); - if ( (aic_inb(p, TARG_SCSIRATE + i) & 0x18) == 0x18 ) - sc->device_flags[i] |= CFSYNCHISULTRA; - } - } - else - { - if (aic_inb(p, TARG_SCSIRATE + i) & ~WIDEXFER) - { - sc->device_flags[i] |= CFSYNCH; - if (p->features & AHC_ULTRA) - sc->device_flags[i] |= ((p->ultraenb & mask) ? - CFSYNCHISULTRA : 0); - } - } - } - else - { - /* - * Assume the BIOS has NOT been run on this card and nothing between - * the card and the devices is configured yet. - */ - sc->device_flags[i] = CFDISC; - if (p->features & AHC_WIDE) - sc->device_flags[i] |= CFWIDEB; - if (p->features & AHC_ULTRA3) - sc->device_flags[i] |= 2; - else if (p->features & AHC_ULTRA2) - sc->device_flags[i] |= 3; - else if (p->features & AHC_ULTRA) - sc->device_flags[i] |= CFSYNCHISULTRA; - sc->device_flags[i] |= CFSYNCH; - aic_outb(p, 0, TARG_SCSIRATE + i); - if (p->features & AHC_ULTRA2) - aic_outb(p, 0, TARG_OFFSET + i); - } - } - if (sc->device_flags[i] & CFDISC) - { - p->discenable |= mask; - } - if (p->flags & AHC_NEWEEPROM_FMT) - { - if ( !(p->features & AHC_ULTRA2) ) - { - /* - * I know of two different Ultra BIOSes that do this differently. - * One on the Gigabyte 6BXU mb that wants flags[i] & CFXFER to - * be == to 0x03 and SYNCHISULTRA to be true to mean 40MByte/s - * while on the IBM Netfinity 5000 they want the same thing - * to be something else, while flags[i] & CFXFER == 0x03 and - * SYNCHISULTRA false should be 40MByte/s. So, we set both to - * 40MByte/s and the lower speeds be damned. People will have - * to select around the conversely mapped lower speeds in order - * to select lower speeds on these boards. - */ - if ( (sc->device_flags[i] & CFNEWULTRAFORMAT) && - ((sc->device_flags[i] & CFXFER) == 0x03) ) - { - sc->device_flags[i] &= ~CFXFER; - sc->device_flags[i] |= CFSYNCHISULTRA; - } - if (sc->device_flags[i] & CFSYNCHISULTRA) - { - p->ultraenb |= mask; - } - } - else if ( !(sc->device_flags[i] & CFNEWULTRAFORMAT) && - (p->features & AHC_ULTRA2) && - (sc->device_flags[i] & CFSYNCHISULTRA) ) - { - p->ultraenb |= mask; - } - } - else if (sc->adapter_control & CFULTRAEN) - { - p->ultraenb |= mask; - } - if ( (sc->device_flags[i] & CFSYNCH) == 0) - { - sc->device_flags[i] &= ~CFXFER; - p->ultraenb &= ~mask; - p->user[i].offset = 0; - p->user[i].period = 0; - p->user[i].options = 0; - } - else - { - if (p->features & AHC_ULTRA3) - { - p->user[i].offset = MAX_OFFSET_ULTRA2; - if( (sc->device_flags[i] & CFXFER) < 0x03 ) - { - scsirate = (sc->device_flags[i] & CFXFER); - p->user[i].options = MSG_EXT_PPR_OPTION_DT_CRC; - } - else - { - scsirate = (sc->device_flags[i] & CFXFER) | - ((p->ultraenb & mask) ? 0x18 : 0x10); - p->user[i].options = 0; - } - p->user[i].period = aic7xxx_find_period(p, scsirate, - AHC_SYNCRATE_ULTRA3); - } - else if (p->features & AHC_ULTRA2) - { - p->user[i].offset = MAX_OFFSET_ULTRA2; - scsirate = (sc->device_flags[i] & CFXFER) | - ((p->ultraenb & mask) ? 0x18 : 0x10); - p->user[i].options = 0; - p->user[i].period = aic7xxx_find_period(p, scsirate, - AHC_SYNCRATE_ULTRA2); - } - else - { - scsirate = (sc->device_flags[i] & CFXFER) << 4; - p->user[i].options = 0; - p->user[i].offset = MAX_OFFSET_8BIT; - if (p->features & AHC_ULTRA) - { - short ultraenb; - ultraenb = aic_inb(p, ULTRA_ENB) | - (aic_inb(p, ULTRA_ENB + 1) << 8); - p->user[i].period = aic7xxx_find_period(p, scsirate, - (p->ultraenb & mask) ? - AHC_SYNCRATE_ULTRA : - AHC_SYNCRATE_FAST); - } - else - p->user[i].period = aic7xxx_find_period(p, scsirate, - AHC_SYNCRATE_FAST); - } - } - if ( (sc->device_flags[i] & CFWIDEB) && (p->features & AHC_WIDE) ) - { - p->user[i].width = MSG_EXT_WDTR_BUS_16_BIT; - } - else - { - p->user[i].width = MSG_EXT_WDTR_BUS_8_BIT; - } - } - aic_outb(p, ~(p->discenable & 0xFF), DISC_DSB); - aic_outb(p, ~((p->discenable >> 8) & 0xFF), DISC_DSB + 1); - - /* - * We set the p->ultraenb from the SEEPROM to begin with, but now we make - * it match what is already down in the card. If we are doing a reset - * on the card then this will get put back to a default state anyway. - * This allows us to not have to pre-emptively negotiate when using the - * no_reset option. - */ - if (p->features & AHC_ULTRA) - p->ultraenb = aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8); - - - scsi_conf = (p->scsi_id & HSCSIID); - - if(have_seeprom) - { - p->adapter_control = sc->adapter_control; - p->bios_control = sc->bios_control; - - switch (p->chip & AHC_CHIPID_MASK) - { - case AHC_AIC7895: - case AHC_AIC7896: - case AHC_AIC7899: - if (p->adapter_control & CFBPRIMARY) - p->flags |= AHC_CHANNEL_B_PRIMARY; - default: - break; - } - - if (sc->adapter_control & CFSPARITY) - scsi_conf |= ENSPCHK; - } - else - { - scsi_conf |= ENSPCHK | RESET_SCSI; - } - - /* - * Only set the SCSICONF and SCSICONF + 1 registers if we are a PCI card. - * The 2842 and 2742 cards already have these registers set and we don't - * want to muck with them since we don't set all the bits they do. - */ - if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI ) - { - /* Set the host ID */ - aic_outb(p, scsi_conf, SCSICONF); - /* In case we are a wide card */ - aic_outb(p, p->scsi_id, SCSICONF + 1); - } -} - -/*+F************************************************************************* - * Function: - * aic7xxx_configure_bugs - * - * Description: - * Take the card passed in and set the appropriate bug flags based upon - * the card model. Also make any changes needed to device registers or - * PCI registers while we are here. - *-F*************************************************************************/ -static void -aic7xxx_configure_bugs(struct aic7xxx_host *p) -{ - unsigned short tmp_word; - - switch(p->chip & AHC_CHIPID_MASK) - { - case AHC_AIC7860: - p->bugs |= AHC_BUG_PCI_2_1_RETRY; - /* fall through */ - case AHC_AIC7850: - case AHC_AIC7870: - p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI; - break; - case AHC_AIC7880: - p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_PCI_2_1_RETRY | - AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI; - break; - case AHC_AIC7890: - p->bugs |= AHC_BUG_AUTOFLUSH | AHC_BUG_CACHETHEN; - break; - case AHC_AIC7892: - p->bugs |= AHC_BUG_SCBCHAN_UPLOAD; - break; - case AHC_AIC7895: - p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_PCI_2_1_RETRY | - AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI; - break; - case AHC_AIC7896: - p->bugs |= AHC_BUG_CACHETHEN_DIS; - break; - case AHC_AIC7899: - p->bugs |= AHC_BUG_SCBCHAN_UPLOAD; - break; - default: - /* Nothing to do */ - break; - } - - /* - * Now handle the bugs that require PCI register or card register tweaks - */ - pci_read_config_word(p->pdev, PCI_COMMAND, &tmp_word); - if(p->bugs & AHC_BUG_PCI_MWI) - { - tmp_word &= ~PCI_COMMAND_INVALIDATE; - } - else - { - tmp_word |= PCI_COMMAND_INVALIDATE; - } - pci_write_config_word(p->pdev, PCI_COMMAND, tmp_word); - - if(p->bugs & AHC_BUG_CACHETHEN) - { - aic_outb(p, aic_inb(p, DSCOMMAND0) & ~CACHETHEN, DSCOMMAND0); - } - else if (p->bugs & AHC_BUG_CACHETHEN_DIS) - { - aic_outb(p, aic_inb(p, DSCOMMAND0) | CACHETHEN, DSCOMMAND0); - } - - return; -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_detect - * - * Description: - * Try to detect and register an Adaptec 7770 or 7870 SCSI controller. - * - * XXX - This should really be called aic7xxx_probe(). A sequence of - * probe(), attach()/detach(), and init() makes more sense than - * one do-it-all function. This may be useful when (and if) the - * mid-level SCSI code is overhauled. - *-F*************************************************************************/ -static int -aic7xxx_detect(struct scsi_host_template *template) -{ - struct aic7xxx_host *temp_p = NULL; - struct aic7xxx_host *current_p = NULL; - struct aic7xxx_host *list_p = NULL; - int found = 0; -#if defined(__i386__) || defined(__alpha__) - ahc_flag_type flags = 0; - int type; -#endif - unsigned char sxfrctl1; -#if defined(__i386__) || defined(__alpha__) - unsigned char hcntrl, hostconf; - unsigned int slot, base; -#endif - -#ifdef MODULE - /* - * If we are called as a module, the aic7xxx pointer may not be null - * and it would point to our bootup string, just like on the lilo - * command line. IF not NULL, then process this config string with - * aic7xxx_setup - */ - if(aic7xxx) - aic7xxx_setup(aic7xxx); -#endif - - template->proc_name = "aic7xxx"; - template->sg_tablesize = AIC7XXX_MAX_SG; - - -#ifdef CONFIG_PCI - /* - * PCI-bus probe. - */ - { - static struct - { - unsigned short vendor_id; - unsigned short device_id; - ahc_chip chip; - ahc_flag_type flags; - ahc_feature features; - int board_name_index; - unsigned short seeprom_size; - unsigned short seeprom_type; - } const aic_pdevs[] = { - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7810, AHC_NONE, - AHC_FNONE, AHC_FENONE, 1, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7850, AHC_AIC7850, - AHC_PAGESCBS, AHC_AIC7850_FE, 5, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7855, AHC_AIC7850, - AHC_PAGESCBS, AHC_AIC7850_FE, 6, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7821, AHC_AIC7860, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7860_FE, 7, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_3860, AHC_AIC7860, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7860_FE, 7, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_38602, AHC_AIC7860, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7860_FE, 7, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_38602, AHC_AIC7860, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7860_FE, 7, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7860, AHC_AIC7860, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MOTHERBOARD, - AHC_AIC7860_FE, 7, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7861, AHC_AIC7860, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7860_FE, 8, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7870, AHC_AIC7870, - AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MOTHERBOARD, - AHC_AIC7870_FE, 9, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7871, AHC_AIC7870, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 10, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7872, AHC_AIC7870, - AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7870_FE, 11, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7873, AHC_AIC7870, - AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7870_FE, 12, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7874, AHC_AIC7870, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE, 13, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7880, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MOTHERBOARD, - AHC_AIC7880_FE, 14, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7881, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 15, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7882, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7880_FE, 16, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7883, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7880_FE, 17, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7884, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7885, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7886, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7887, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE | AHC_NEW_AUTOTERM, 19, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7888, AHC_AIC7880, - AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE, 18, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7895, AHC_AIC7895, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7895_FE, 20, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890, AHC_AIC7890, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7890_FE, 21, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890B, AHC_AIC7890, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7890_FE, 21, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2930U2, AHC_AIC7890, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7890_FE, 22, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2940U2, AHC_AIC7890, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7890_FE, 23, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7896, AHC_AIC7896, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7896_FE, 24, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3940U2, AHC_AIC7896, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7896_FE, 25, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3950U2D, AHC_AIC7896, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7896_FE, 26, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_1480A, AHC_AIC7860, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_NO_STPWEN, - AHC_AIC7860_FE, 27, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892A, AHC_AIC7892, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7892_FE, 28, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892B, AHC_AIC7892, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7892_FE, 28, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892D, AHC_AIC7892, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7892_FE, 28, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892P, AHC_AIC7892, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, - AHC_AIC7892_FE, 28, - 32, C46 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899A, AHC_AIC7899, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7899_FE, 29, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899B, AHC_AIC7899, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7899_FE, 29, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899D, AHC_AIC7899, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7899_FE, 29, - 32, C56_66 }, - {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899P, AHC_AIC7899, - AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, - AHC_AIC7899_FE, 29, - 32, C56_66 }, - }; - - unsigned short command; - unsigned int devconfig, i, oldverbose; - struct pci_dev *pdev = NULL; - - for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++) - { - pdev = NULL; - while ((pdev = pci_get_device(aic_pdevs[i].vendor_id, - aic_pdevs[i].device_id, - pdev))) { - if (pci_enable_device(pdev)) - continue; - if ( i == 0 ) /* We found one, but it's the 7810 RAID cont. */ - { - if (aic7xxx_verbose & (VERBOSE_PROBE|VERBOSE_PROBE2)) - { - printk(KERN_INFO "aic7xxx: The 7810 RAID controller is not " - "supported by\n"); - printk(KERN_INFO " this driver, we are ignoring it.\n"); - } - } - else if ( (temp_p = kzalloc(sizeof(struct aic7xxx_host), - GFP_ATOMIC)) != NULL ) - { - temp_p->chip = aic_pdevs[i].chip | AHC_PCI; - temp_p->flags = aic_pdevs[i].flags; - temp_p->features = aic_pdevs[i].features; - temp_p->board_name_index = aic_pdevs[i].board_name_index; - temp_p->sc_size = aic_pdevs[i].seeprom_size; - temp_p->sc_type = aic_pdevs[i].seeprom_type; - - /* - * Read sundry information from PCI BIOS. - */ - temp_p->irq = pdev->irq; - temp_p->pdev = pdev; - temp_p->pci_bus = pdev->bus->number; - temp_p->pci_device_fn = pdev->devfn; - temp_p->base = pci_resource_start(pdev, 0); - temp_p->mbase = pci_resource_start(pdev, 1); - current_p = list_p; - while(current_p && temp_p) - { - if ( ((current_p->pci_bus == temp_p->pci_bus) && - (current_p->pci_device_fn == temp_p->pci_device_fn)) || - (temp_p->base && (current_p->base == temp_p->base)) || - (temp_p->mbase && (current_p->mbase == temp_p->mbase)) ) - { - /* duplicate PCI entry, skip it */ - kfree(temp_p); - temp_p = NULL; - continue; - } - current_p = current_p->next; - } - if(pci_request_regions(temp_p->pdev, "aic7xxx")) - { - printk("aic7xxx: <%s> at PCI %d/%d/%d\n", - board_names[aic_pdevs[i].board_name_index], - temp_p->pci_bus, - PCI_SLOT(temp_p->pci_device_fn), - PCI_FUNC(temp_p->pci_device_fn)); - printk("aic7xxx: I/O ports already in use, ignoring.\n"); - kfree(temp_p); - continue; - } - - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk("aic7xxx: <%s> at PCI %d/%d\n", - board_names[aic_pdevs[i].board_name_index], - PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); - pci_read_config_word(pdev, PCI_COMMAND, &command); - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk("aic7xxx: Initial PCI_COMMAND value was 0x%x\n", - (int)command); - } -#ifdef AIC7XXX_STRICT_PCI_SETUP - command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | - PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; -#else - command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; -#endif - command &= ~PCI_COMMAND_INVALIDATE; - if (aic7xxx_pci_parity == 0) - command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); - pci_write_config_word(pdev, PCI_COMMAND, command); -#ifdef AIC7XXX_STRICT_PCI_SETUP - pci_read_config_dword(pdev, DEVCONFIG, &devconfig); - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig); - } - devconfig |= 0x80000040; - pci_write_config_dword(pdev, DEVCONFIG, devconfig); -#endif /* AIC7XXX_STRICT_PCI_SETUP */ - - temp_p->unpause = INTEN; - temp_p->pause = temp_p->unpause | PAUSE; - if ( ((temp_p->base == 0) && - (temp_p->mbase == 0)) || - (temp_p->irq == 0) ) - { - printk("aic7xxx: <%s> at PCI %d/%d/%d\n", - board_names[aic_pdevs[i].board_name_index], - temp_p->pci_bus, - PCI_SLOT(temp_p->pci_device_fn), - PCI_FUNC(temp_p->pci_device_fn)); - printk("aic7xxx: Controller disabled by BIOS, ignoring.\n"); - goto skip_pci_controller; - } - -#ifdef MMAPIO - if ( !(temp_p->base) || !(temp_p->flags & AHC_MULTI_CHANNEL) || - ((temp_p->chip != (AHC_AIC7870 | AHC_PCI)) && - (temp_p->chip != (AHC_AIC7880 | AHC_PCI))) ) - { - temp_p->maddr = ioremap_nocache(temp_p->mbase, 256); - if(temp_p->maddr) - { - /* - * We need to check the I/O with the MMAPed address. Some machines - * simply fail to work with MMAPed I/O and certain controllers. - */ - if(aic_inb(temp_p, HCNTRL) == 0xff) - { - /* - * OK.....we failed our test....go back to programmed I/O - */ - printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n", - board_names[aic_pdevs[i].board_name_index], - temp_p->pci_bus, - PCI_SLOT(temp_p->pci_device_fn), - PCI_FUNC(temp_p->pci_device_fn)); - printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to " - "Programmed I/O.\n"); - iounmap(temp_p->maddr); - temp_p->maddr = NULL; - if(temp_p->base == 0) - { - printk("aic7xxx: <%s> at PCI %d/%d/%d\n", - board_names[aic_pdevs[i].board_name_index], - temp_p->pci_bus, - PCI_SLOT(temp_p->pci_device_fn), - PCI_FUNC(temp_p->pci_device_fn)); - printk("aic7xxx: Controller disabled by BIOS, ignoring.\n"); - goto skip_pci_controller; - } - } - } - } -#endif - - /* - * We HAVE to make sure the first pause_sequencer() and all other - * subsequent I/O that isn't PCI config space I/O takes place - * after the MMAPed I/O region is configured and tested. The - * problem is the PowerPC architecture that doesn't support - * programmed I/O at all, so we have to have the MMAP I/O set up - * for this pause to even work on those machines. - */ - pause_sequencer(temp_p); - - /* - * Clear out any pending PCI error status messages. Also set - * verbose to 0 so that we don't emit strange PCI error messages - * while cleaning out the current status bits. - */ - oldverbose = aic7xxx_verbose; - aic7xxx_verbose = 0; - aic7xxx_pci_intr(temp_p); - aic7xxx_verbose = oldverbose; - - temp_p->bios_address = 0; - - /* - * Remember how the card was setup in case there is no seeprom. - */ - if (temp_p->features & AHC_ULTRA2) - temp_p->scsi_id = aic_inb(temp_p, SCSIID_ULTRA2) & OID; - else - temp_p->scsi_id = aic_inb(temp_p, SCSIID) & OID; - /* - * Get current termination setting - */ - sxfrctl1 = aic_inb(temp_p, SXFRCTL1); - - if (aic7xxx_chip_reset(temp_p) == -1) - { - goto skip_pci_controller; - } - /* - * Very quickly put the term setting back into the register since - * the chip reset may cause odd things to happen. This is to keep - * LVD busses with lots of drives from draining the power out of - * the diffsense line before we get around to running the - * configure_termination() function. Also restore the STPWLEVEL - * bit of DEVCONFIG - */ - aic_outb(temp_p, sxfrctl1, SXFRCTL1); - pci_write_config_dword(temp_p->pdev, DEVCONFIG, devconfig); - sxfrctl1 &= STPWEN; - - /* - * We need to set the CHNL? assignments before loading the SEEPROM - * The 3940 and 3985 cards (original stuff, not any of the later - * stuff) are 7870 and 7880 class chips. The Ultra2 stuff falls - * under 7896 and 7897. The 7895 is in a class by itself :) - */ - switch (temp_p->chip & AHC_CHIPID_MASK) - { - case AHC_AIC7870: /* 3840 / 3985 */ - case AHC_AIC7880: /* 3840 UW / 3985 UW */ - if(temp_p->flags & AHC_MULTI_CHANNEL) - { - switch(PCI_SLOT(temp_p->pci_device_fn)) - { - case 5: - temp_p->flags |= AHC_CHNLB; - break; - case 8: - temp_p->flags |= AHC_CHNLB; - break; - case 12: - temp_p->flags |= AHC_CHNLC; - break; - default: - break; - } - } - break; - - case AHC_AIC7895: /* 7895 */ - case AHC_AIC7896: /* 7896/7 */ - case AHC_AIC7899: /* 7899 */ - if (PCI_FUNC(pdev->devfn) != 0) - { - temp_p->flags |= AHC_CHNLB; - } - /* - * The 7895 is the only chipset that sets the SCBSIZE32 param - * in the DEVCONFIG register. The Ultra2 chipsets use - * the DSCOMMAND0 register instead. - */ - if ((temp_p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) - { - pci_read_config_dword(pdev, DEVCONFIG, &devconfig); - devconfig |= SCBSIZE32; - pci_write_config_dword(pdev, DEVCONFIG, devconfig); - } - break; - default: - break; - } - - /* - * Loading of the SEEPROM needs to come after we've set the flags - * to indicate possible CHNLB and CHNLC assigments. Otherwise, - * on 394x and 398x cards we'll end up reading the wrong settings - * for channels B and C - */ - switch (temp_p->chip & AHC_CHIPID_MASK) - { - case AHC_AIC7892: - case AHC_AIC7899: - aic_outb(temp_p, 0, SCAMCTL); - /* - * Switch to the alt mode of the chip... - */ - aic_outb(temp_p, aic_inb(temp_p, SFUNCT) | ALT_MODE, SFUNCT); - /* - * Set our options...the last two items set our CRC after x byte - * count in target mode... - */ - aic_outb(temp_p, AUTO_MSGOUT_DE | DIS_MSGIN_DUALEDGE, OPTIONMODE); - aic_outb(temp_p, 0x00, 0x0b); - aic_outb(temp_p, 0x10, 0x0a); - /* - * switch back to normal mode... - */ - aic_outb(temp_p, aic_inb(temp_p, SFUNCT) & ~ALT_MODE, SFUNCT); - aic_outb(temp_p, CRCVALCHKEN | CRCENDCHKEN | CRCREQCHKEN | - TARGCRCENDEN | TARGCRCCNTEN, - CRCCONTROL1); - aic_outb(temp_p, ((aic_inb(temp_p, DSCOMMAND0) | USCBSIZE32 | - MPARCKEN | CIOPARCKEN | CACHETHEN) & - ~DPARCKEN), DSCOMMAND0); - aic7xxx_load_seeprom(temp_p, &sxfrctl1); - break; - case AHC_AIC7890: - case AHC_AIC7896: - aic_outb(temp_p, 0, SCAMCTL); - aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) | - CACHETHEN | MPARCKEN | USCBSIZE32 | - CIOPARCKEN) & ~DPARCKEN, DSCOMMAND0); - aic7xxx_load_seeprom(temp_p, &sxfrctl1); - break; - case AHC_AIC7850: - case AHC_AIC7860: - /* - * Set the DSCOMMAND0 register on these cards different from - * on the 789x cards. Also, read the SEEPROM as well. - */ - aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) | - CACHETHEN | MPARCKEN) & ~DPARCKEN, - DSCOMMAND0); - /* FALLTHROUGH */ - default: - aic7xxx_load_seeprom(temp_p, &sxfrctl1); - break; - case AHC_AIC7880: - /* - * Check the rev of the chipset before we change DSCOMMAND0 - */ - pci_read_config_dword(pdev, DEVCONFIG, &devconfig); - if ((devconfig & 0xff) >= 1) - { - aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) | - CACHETHEN | MPARCKEN) & ~DPARCKEN, - DSCOMMAND0); - } - aic7xxx_load_seeprom(temp_p, &sxfrctl1); - break; - } - - - /* - * and then we need another switch based on the type in order to - * make sure the channel B primary flag is set properly on 7895 - * controllers....Arrrgggghhh!!! We also have to catch the fact - * that when you disable the BIOS on the 7895 on the Intel DK440LX - * motherboard, and possibly others, it only sets the BIOS disabled - * bit on the A channel...I think I'm starting to lean towards - * going postal.... - */ - switch(temp_p->chip & AHC_CHIPID_MASK) - { - case AHC_AIC7895: - case AHC_AIC7896: - case AHC_AIC7899: - current_p = list_p; - while(current_p != NULL) - { - if ( (current_p->pci_bus == temp_p->pci_bus) && - (PCI_SLOT(current_p->pci_device_fn) == - PCI_SLOT(temp_p->pci_device_fn)) ) - { - if ( PCI_FUNC(current_p->pci_device_fn) == 0 ) - { - temp_p->flags |= - (current_p->flags & AHC_CHANNEL_B_PRIMARY); - temp_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS); - temp_p->flags |= - (current_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS)); - } - else - { - current_p->flags |= - (temp_p->flags & AHC_CHANNEL_B_PRIMARY); - current_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS); - current_p->flags |= - (temp_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS)); - } - } - current_p = current_p->next; - } - break; - default: - break; - } - - /* - * We only support external SCB RAM on the 7895/6/7 chipsets. - * We could support it on the 7890/1 easy enough, but I don't - * know of any 7890/1 based cards that have it. I do know - * of 7895/6/7 cards that have it and they work properly. - */ - switch(temp_p->chip & AHC_CHIPID_MASK) - { - default: - break; - case AHC_AIC7895: - case AHC_AIC7896: - case AHC_AIC7899: - pci_read_config_dword(pdev, DEVCONFIG, &devconfig); - if (temp_p->features & AHC_ULTRA2) - { - if ( (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2) && - (aic7xxx_scbram) ) - { - aic_outb(temp_p, - aic_inb(temp_p, DSCOMMAND0) & ~SCBRAMSEL_ULTRA2, - DSCOMMAND0); - temp_p->flags |= AHC_EXTERNAL_SRAM; - devconfig |= EXTSCBPEN; - } - else if (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2) - { - printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n", - board_names[aic_pdevs[i].board_name_index], - temp_p->pci_bus, - PCI_SLOT(temp_p->pci_device_fn), - PCI_FUNC(temp_p->pci_device_fn)); - printk("aic7xxx: external SCB RAM detected, " - "but not enabled\n"); - } - } - else - { - if ((devconfig & RAMPSM) && (aic7xxx_scbram)) - { - devconfig &= ~SCBRAMSEL; - devconfig |= EXTSCBPEN; - temp_p->flags |= AHC_EXTERNAL_SRAM; - } - else if (devconfig & RAMPSM) - { - printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n", - board_names[aic_pdevs[i].board_name_index], - temp_p->pci_bus, - PCI_SLOT(temp_p->pci_device_fn), - PCI_FUNC(temp_p->pci_device_fn)); - printk("aic7xxx: external SCB RAM detected, " - "but not enabled\n"); - } - } - pci_write_config_dword(pdev, DEVCONFIG, devconfig); - if ( (temp_p->flags & AHC_EXTERNAL_SRAM) && - (temp_p->flags & AHC_CHNLB) ) - aic_outb(temp_p, 1, CCSCBBADDR); - break; - } - - /* - * Take the LED out of diagnostic mode - */ - aic_outb(temp_p, - (aic_inb(temp_p, SBLKCTL) & ~(DIAGLEDEN | DIAGLEDON)), - SBLKCTL); - - /* - * We don't know where this is set in the SEEPROM or by the - * BIOS, so we default to 100%. On Ultra2 controllers, use 75% - * instead. - */ - if (temp_p->features & AHC_ULTRA2) - { - aic_outb(temp_p, RD_DFTHRSH_MAX | WR_DFTHRSH_MAX, DFF_THRSH); - } - else - { - aic_outb(temp_p, DFTHRSH_100, DSPCISTATUS); - } - - /* - * Call our function to fixup any bugs that exist on this chipset. - * This may muck with PCI settings and other device settings, so - * make sure it's after all the other PCI and device register - * tweaks so it can back out bad settings on specific broken cards. - */ - aic7xxx_configure_bugs(temp_p); - - /* Hold a pci device reference */ - pci_dev_get(temp_p->pdev); - - if ( list_p == NULL ) - { - list_p = current_p = temp_p; - } - else - { - current_p = list_p; - while(current_p->next != NULL) - current_p = current_p->next; - current_p->next = temp_p; - } - temp_p->next = NULL; - found++; - continue; -skip_pci_controller: -#ifdef CONFIG_PCI - pci_release_regions(temp_p->pdev); -#endif - kfree(temp_p); - } /* Found an Adaptec PCI device. */ - else /* Well, we found one, but we couldn't get any memory */ - { - printk("aic7xxx: Found <%s>\n", - board_names[aic_pdevs[i].board_name_index]); - printk(KERN_INFO "aic7xxx: Unable to allocate device memory, " - "skipping.\n"); - } - } /* while(pdev=....) */ - } /* for PCI_DEVICES */ - } -#endif /* CONFIG_PCI */ - -#if defined(__i386__) || defined(__alpha__) - /* - * EISA/VL-bus card signature probe. - */ - slot = MINSLOT; - while ( (slot <= MAXSLOT) && - !(aic7xxx_no_probe) ) - { - base = SLOTBASE(slot) + MINREG; - - if (!request_region(base, MAXREG - MINREG, "aic7xxx")) - { - /* - * Some other driver has staked a - * claim to this i/o region already. - */ - slot++; - continue; /* back to the beginning of the for loop */ - } - flags = 0; - type = aic7xxx_probe(slot, base + AHC_HID0, &flags); - if (type == -1) - { - release_region(base, MAXREG - MINREG); - slot++; - continue; - } - temp_p = kmalloc(sizeof(struct aic7xxx_host), GFP_ATOMIC); - if (temp_p == NULL) - { - printk(KERN_WARNING "aic7xxx: Unable to allocate device space.\n"); - release_region(base, MAXREG - MINREG); - slot++; - continue; /* back to the beginning of the while loop */ - } - - /* - * Pause the card preserving the IRQ type. Allow the operator - * to override the IRQ trigger. - */ - if (aic7xxx_irq_trigger == 1) - hcntrl = IRQMS; /* Level */ - else if (aic7xxx_irq_trigger == 0) - hcntrl = 0; /* Edge */ - else - hcntrl = inb(base + HCNTRL) & IRQMS; /* Default */ - memset(temp_p, 0, sizeof(struct aic7xxx_host)); - temp_p->unpause = hcntrl | INTEN; - temp_p->pause = hcntrl | PAUSE | INTEN; - temp_p->base = base; - temp_p->mbase = 0; - temp_p->maddr = NULL; - temp_p->pci_bus = 0; - temp_p->pci_device_fn = slot; - aic_outb(temp_p, hcntrl | PAUSE, HCNTRL); - while( (aic_inb(temp_p, HCNTRL) & PAUSE) == 0 ) ; - if (aic7xxx_chip_reset(temp_p) == -1) - temp_p->irq = 0; - else - temp_p->irq = aic_inb(temp_p, INTDEF) & 0x0F; - temp_p->flags |= AHC_PAGESCBS; - - switch (temp_p->irq) - { - case 9: - case 10: - case 11: - case 12: - case 14: - case 15: - break; - - default: - printk(KERN_WARNING "aic7xxx: Host adapter uses unsupported IRQ " - "level %d, ignoring.\n", temp_p->irq); - kfree(temp_p); - release_region(base, MAXREG - MINREG); - slot++; - continue; /* back to the beginning of the while loop */ - } - - /* - * We are committed now, everything has been checked and this card - * has been found, now we just set it up - */ - - /* - * Insert our new struct into the list at the end - */ - if (list_p == NULL) - { - list_p = current_p = temp_p; - } - else - { - current_p = list_p; - while (current_p->next != NULL) - current_p = current_p->next; - current_p->next = temp_p; - } - - switch (type) - { - case 0: - temp_p->board_name_index = 2; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk("aic7xxx: <%s> at EISA %d\n", - board_names[2], slot); - /* FALLTHROUGH */ - case 1: - { - temp_p->chip = AHC_AIC7770 | AHC_EISA; - temp_p->features |= AHC_AIC7770_FE; - temp_p->bios_control = aic_inb(temp_p, HA_274_BIOSCTRL); - - /* - * Get the primary channel information. Right now we don't - * do anything with this, but someday we will be able to inform - * the mid-level SCSI code which channel is primary. - */ - if (temp_p->board_name_index == 0) - { - temp_p->board_name_index = 3; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk("aic7xxx: <%s> at EISA %d\n", - board_names[3], slot); - } - if (temp_p->bios_control & CHANNEL_B_PRIMARY) - { - temp_p->flags |= AHC_CHANNEL_B_PRIMARY; - } - - if ((temp_p->bios_control & BIOSMODE) == BIOSDISABLED) - { - temp_p->flags &= ~AHC_BIOS_ENABLED; - } - else - { - temp_p->flags &= ~AHC_USEDEFAULTS; - temp_p->flags |= AHC_BIOS_ENABLED; - if ( (temp_p->bios_control & 0x20) == 0 ) - { - temp_p->bios_address = 0xcc000; - temp_p->bios_address += (0x4000 * (temp_p->bios_control & 0x07)); - } - else - { - temp_p->bios_address = 0xd0000; - temp_p->bios_address += (0x8000 * (temp_p->bios_control & 0x06)); - } - } - temp_p->adapter_control = aic_inb(temp_p, SCSICONF) << 8; - temp_p->adapter_control |= aic_inb(temp_p, SCSICONF + 1); - if (temp_p->features & AHC_WIDE) - { - temp_p->scsi_id = temp_p->adapter_control & HWSCSIID; - temp_p->scsi_id_b = temp_p->scsi_id; - } - else - { - temp_p->scsi_id = (temp_p->adapter_control >> 8) & HSCSIID; - temp_p->scsi_id_b = temp_p->adapter_control & HSCSIID; - } - aic7xxx_load_seeprom(temp_p, &sxfrctl1); - break; - } - - case 2: - case 3: - temp_p->chip = AHC_AIC7770 | AHC_VL; - temp_p->features |= AHC_AIC7770_FE; - if (type == 2) - temp_p->flags |= AHC_BIOS_ENABLED; - else - temp_p->flags &= ~AHC_BIOS_ENABLED; - if (aic_inb(temp_p, SCSICONF) & TERM_ENB) - sxfrctl1 = STPWEN; - aic7xxx_load_seeprom(temp_p, &sxfrctl1); - temp_p->board_name_index = 4; - if (aic7xxx_verbose & VERBOSE_PROBE2) - printk("aic7xxx: <%s> at VLB %d\n", - board_names[2], slot); - switch( aic_inb(temp_p, STATUS_2840) & BIOS_SEL ) - { - case 0x00: - temp_p->bios_address = 0xe0000; - break; - case 0x20: - temp_p->bios_address = 0xc8000; - break; - case 0x40: - temp_p->bios_address = 0xd0000; - break; - case 0x60: - temp_p->bios_address = 0xd8000; - break; - default: - break; /* can't get here */ - } - break; - - default: /* Won't get here. */ - break; - } - if (aic7xxx_verbose & VERBOSE_PROBE2) - { - printk(KERN_INFO "aic7xxx: BIOS %sabled, IO Port 0x%lx, IRQ %d (%s)\n", - (temp_p->flags & AHC_USEDEFAULTS) ? "dis" : "en", temp_p->base, - temp_p->irq, - (temp_p->pause & IRQMS) ? "level sensitive" : "edge triggered"); - printk(KERN_INFO "aic7xxx: Extended translation %sabled.\n", - (temp_p->flags & AHC_EXTEND_TRANS_A) ? "en" : "dis"); - } - - /* - * All the 7770 based chipsets have this bug - */ - temp_p->bugs |= AHC_BUG_TMODE_WIDEODD; - - /* - * Set the FIFO threshold and the bus off time. - */ - hostconf = aic_inb(temp_p, HOSTCONF); - aic_outb(temp_p, hostconf & DFTHRSH, BUSSPD); - aic_outb(temp_p, (hostconf << 2) & BOFF, BUSTIME); - slot++; - found++; - } - -#endif /* defined(__i386__) || defined(__alpha__) */ - - /* - * Now, we re-order the probed devices by BIOS address and BUS class. - * In general, we follow this algorithm to make the adapters show up - * in the same order under linux that the computer finds them. - * 1: All VLB/EISA cards with BIOS_ENABLED first, according to BIOS - * address, going from lowest to highest. - * 2: All PCI controllers with BIOS_ENABLED next, according to BIOS - * address, going from lowest to highest. - * 3: Remaining VLB/EISA controllers going in slot order. - * 4: Remaining PCI controllers, going in PCI device order (reversible) - */ - - { - struct aic7xxx_host *sort_list[4] = { NULL, NULL, NULL, NULL }; - struct aic7xxx_host *vlb, *pci; - struct aic7xxx_host *prev_p; - struct aic7xxx_host *p; - unsigned char left; - - prev_p = vlb = pci = NULL; - - temp_p = list_p; - while (temp_p != NULL) - { - switch(temp_p->chip & ~AHC_CHIPID_MASK) - { - case AHC_EISA: - case AHC_VL: - { - p = temp_p; - if (p->flags & AHC_BIOS_ENABLED) - vlb = sort_list[0]; - else - vlb = sort_list[2]; - - if (vlb == NULL) - { - vlb = temp_p; - temp_p = temp_p->next; - vlb->next = NULL; - } - else - { - current_p = vlb; - prev_p = NULL; - while ( (current_p != NULL) && - (current_p->bios_address < temp_p->bios_address)) - { - prev_p = current_p; - current_p = current_p->next; - } - if (prev_p != NULL) - { - prev_p->next = temp_p; - temp_p = temp_p->next; - prev_p->next->next = current_p; - } - else - { - vlb = temp_p; - temp_p = temp_p->next; - vlb->next = current_p; - } - } - - if (p->flags & AHC_BIOS_ENABLED) - sort_list[0] = vlb; - else - sort_list[2] = vlb; - - break; - } - default: /* All PCI controllers fall through to default */ - { - - p = temp_p; - if (p->flags & AHC_BIOS_ENABLED) - pci = sort_list[1]; - else - pci = sort_list[3]; - - if (pci == NULL) - { - pci = temp_p; - temp_p = temp_p->next; - pci->next = NULL; - } - else - { - current_p = pci; - prev_p = NULL; - if (!aic7xxx_reverse_scan) - { - while ( (current_p != NULL) && - ( (PCI_SLOT(current_p->pci_device_fn) | - (current_p->pci_bus << 8)) < - (PCI_SLOT(temp_p->pci_device_fn) | - (temp_p->pci_bus << 8)) ) ) - { - prev_p = current_p; - current_p = current_p->next; - } - } - else - { - while ( (current_p != NULL) && - ( (PCI_SLOT(current_p->pci_device_fn) | - (current_p->pci_bus << 8)) > - (PCI_SLOT(temp_p->pci_device_fn) | - (temp_p->pci_bus << 8)) ) ) - { - prev_p = current_p; - current_p = current_p->next; - } - } - /* - * Are we dealing with a 7895/6/7/9 where we need to sort the - * channels as well, if so, the bios_address values should - * be the same - */ - if ( (current_p) && (temp_p->flags & AHC_MULTI_CHANNEL) && - (temp_p->pci_bus == current_p->pci_bus) && - (PCI_SLOT(temp_p->pci_device_fn) == - PCI_SLOT(current_p->pci_device_fn)) ) - { - if (temp_p->flags & AHC_CHNLB) - { - if ( !(temp_p->flags & AHC_CHANNEL_B_PRIMARY) ) - { - prev_p = current_p; - current_p = current_p->next; - } - } - else - { - if (temp_p->flags & AHC_CHANNEL_B_PRIMARY) - { - prev_p = current_p; - current_p = current_p->next; - } - } - } - if (prev_p != NULL) - { - prev_p->next = temp_p; - temp_p = temp_p->next; - prev_p->next->next = current_p; - } - else - { - pci = temp_p; - temp_p = temp_p->next; - pci->next = current_p; - } - } - - if (p->flags & AHC_BIOS_ENABLED) - sort_list[1] = pci; - else - sort_list[3] = pci; - - break; - } - } /* End of switch(temp_p->type) */ - } /* End of while (temp_p != NULL) */ - /* - * At this point, the cards have been broken into 4 sorted lists, now - * we run through the lists in order and register each controller - */ - { - int i; - - left = found; - for (i=0; i<ARRAY_SIZE(sort_list); i++) - { - temp_p = sort_list[i]; - while(temp_p != NULL) - { - template->name = board_names[temp_p->board_name_index]; - p = aic7xxx_alloc(template, temp_p); - if (p != NULL) - { - p->instance = found - left; - if (aic7xxx_register(template, p, (--left)) == 0) - { - found--; - aic7xxx_release(p->host); - scsi_unregister(p->host); - } - else if (aic7xxx_dump_card) - { - pause_sequencer(p); - aic7xxx_print_card(p); - aic7xxx_print_scratch_ram(p); - unpause_sequencer(p, TRUE); - } - } - current_p = temp_p; - temp_p = (struct aic7xxx_host *)temp_p->next; - kfree(current_p); - } - } - } - } - return (found); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_buildscb - * - * Description: - * Build a SCB. - *-F*************************************************************************/ -static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd, - struct aic7xxx_scb *scb) -{ - unsigned short mask; - struct aic7xxx_hwscb *hscb; - struct aic_dev_data *aic_dev = cmd->device->hostdata; - struct scsi_device *sdptr = cmd->device; - unsigned char tindex = TARGET_INDEX(cmd); - int use_sg; - - mask = (0x01 << tindex); - hscb = scb->hscb; - - /* - * Setup the control byte if we need negotiation and have not - * already requested it. - */ - hscb->control = 0; - scb->tag_action = 0; - - if (p->discenable & mask) - { - hscb->control |= DISCENB; - /* We always force TEST_UNIT_READY to untagged */ - if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) - { - hscb->control |= MSG_SIMPLE_Q_TAG; - scb->tag_action = MSG_SIMPLE_Q_TAG; - } - } - if ( !(aic_dev->dtr_pending) && - (aic_dev->needppr || aic_dev->needwdtr || aic_dev->needsdtr) && - (aic_dev->flags & DEVICE_DTR_SCANNED) ) - { - aic_dev->dtr_pending = 1; - scb->tag_action = 0; - hscb->control &= DISCENB; - hscb->control |= MK_MESSAGE; - if(aic_dev->needppr) - { - scb->flags |= SCB_MSGOUT_PPR; - } - else if(aic_dev->needwdtr) - { - scb->flags |= SCB_MSGOUT_WDTR; - } - else if(aic_dev->needsdtr) - { - scb->flags |= SCB_MSGOUT_SDTR; - } - scb->flags |= SCB_DTR_SCB; - } - hscb->target_channel_lun = ((cmd->device->id << 4) & 0xF0) | - ((cmd->device->channel & 0x01) << 3) | (cmd->device->lun & 0x07); - - /* - * The interpretation of request_buffer and request_bufflen - * changes depending on whether or not use_sg is zero; a - * non-zero use_sg indicates the number of elements in the - * scatter-gather array. - */ - - /* - * XXX - this relies on the host data being stored in a - * little-endian format. - */ - hscb->SCSI_cmd_length = cmd->cmd_len; - memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len); - hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd)); - - use_sg = scsi_dma_map(cmd); - BUG_ON(use_sg < 0); - - if (use_sg) { - struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */ - - /* - * We must build an SG list in adapter format, as the kernel's SG list - * cannot be used directly because of data field size (__alpha__) - * differences and the kernel SG list uses virtual addresses where - * we need physical addresses. - */ - int i; - - scb->sg_length = 0; - - - /* - * Copy the segments into the SG array. NOTE!!! - We used to - * have the first entry both in the data_pointer area and the first - * SG element. That has changed somewhat. We still have the first - * entry in both places, but now we download the address of - * scb->sg_list[1] instead of 0 to the sg pointer in the hscb. - */ - scsi_for_each_sg(cmd, sg, use_sg, i) { - unsigned int len = sg_dma_len(sg); - scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg)); - scb->sg_list[i].length = cpu_to_le32(len); - scb->sg_length += len; - } - /* Copy the first SG into the data pointer area. */ - hscb->data_pointer = scb->sg_list[0].address; - hscb->data_count = scb->sg_list[0].length; - scb->sg_count = i; - hscb->SG_segment_count = i; - hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1])); - } else { - scb->sg_count = 0; - scb->sg_length = 0; - hscb->SG_segment_count = 0; - hscb->SG_list_pointer = 0; - hscb->data_count = 0; - hscb->data_pointer = 0; - } -} - -/*+F************************************************************************* - * Function: - * aic7xxx_queue - * - * Description: - * Queue a SCB to the controller. - *-F*************************************************************************/ -static int aic7xxx_queue_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) -{ - struct aic7xxx_host *p; - struct aic7xxx_scb *scb; - struct aic_dev_data *aic_dev; - - p = (struct aic7xxx_host *) cmd->device->host->hostdata; - - aic_dev = cmd->device->hostdata; -#ifdef AIC7XXX_VERBOSE_DEBUGGING - if (aic_dev->active_cmds > aic_dev->max_q_depth) - { - printk(WARN_LEAD "Commands queued exceeds queue " - "depth, active=%d\n", - p->host_no, CTL_OF_CMD(cmd), - aic_dev->active_cmds); - } -#endif - - scb = scbq_remove_head(&p->scb_data->free_scbs); - if (scb == NULL) - { - aic7xxx_allocate_scb(p); - scb = scbq_remove_head(&p->scb_data->free_scbs); - if(scb == NULL) - { - printk(WARN_LEAD "Couldn't get a free SCB.\n", p->host_no, - CTL_OF_CMD(cmd)); - return 1; - } - } - scb->cmd = cmd; - - /* - * Make sure the scsi_cmnd pointer is saved, the struct it points to - * is set up properly, and the parity error flag is reset, then send - * the SCB to the sequencer and watch the fun begin. - */ - aic7xxx_position(cmd) = scb->hscb->tag; - cmd->scsi_done = fn; - cmd->result = DID_OK; - aic7xxx_error(cmd) = DID_OK; - aic7xxx_status(cmd) = 0; - cmd->host_scribble = NULL; - - /* - * Construct the SCB beforehand, so the sequencer is - * paused a minimal amount of time. - */ - aic7xxx_buildscb(p, cmd, scb); - - scb->flags |= SCB_ACTIVE | SCB_WAITINGQ; - - scbq_insert_tail(&p->waiting_scbs, scb); - aic7xxx_run_waiting_queues(p); - return (0); -} - -static DEF_SCSI_QCMD(aic7xxx_queue) - -/*+F************************************************************************* - * Function: - * aic7xxx_bus_device_reset - * - * Description: - * Abort or reset the current SCSI command(s). If the scb has not - * previously been aborted, then we attempt to send a BUS_DEVICE_RESET - * message to the target. If the scb has previously been unsuccessfully - * aborted, then we will reset the channel and have all devices renegotiate. - * Returns an enumerated type that indicates the status of the operation. - *-F*************************************************************************/ -static int __aic7xxx_bus_device_reset(struct scsi_cmnd *cmd) -{ - struct aic7xxx_host *p; - struct aic7xxx_scb *scb; - struct aic7xxx_hwscb *hscb; - int channel; - unsigned char saved_scbptr, lastphase; - unsigned char hscb_index; - int disconnected; - struct aic_dev_data *aic_dev; - - if(cmd == NULL) - { - printk(KERN_ERR "aic7xxx_bus_device_reset: called with NULL cmd!\n"); - return FAILED; - } - p = (struct aic7xxx_host *)cmd->device->host->hostdata; - aic_dev = AIC_DEV(cmd); - if(aic7xxx_position(cmd) < p->scb_data->numscbs) - scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]); - else - return FAILED; - - hscb = scb->hscb; - - aic7xxx_isr(p); - aic7xxx_done_cmds_complete(p); - /* If the command was already complete or just completed, then we didn't - * do a reset, return FAILED */ - if(!(scb->flags & SCB_ACTIVE)) - return FAILED; - - pause_sequencer(p); - lastphase = aic_inb(p, LASTPHASE); - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - { - printk(INFO_LEAD "Bus Device reset, scb flags 0x%x, ", - p->host_no, CTL_OF_SCB(scb), scb->flags); - switch (lastphase) - { - case P_DATAOUT: - printk("Data-Out phase\n"); - break; - case P_DATAIN: - printk("Data-In phase\n"); - break; - case P_COMMAND: - printk("Command phase\n"); - break; - case P_MESGOUT: - printk("Message-Out phase\n"); - break; - case P_STATUS: - printk("Status phase\n"); - break; - case P_MESGIN: - printk("Message-In phase\n"); - break; - default: - /* - * We're not in a valid phase, so assume we're idle. - */ - printk("while idle, LASTPHASE = 0x%x\n", lastphase); - break; - } - printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 " - "0x%x\n", p->host_no, CTL_OF_SCB(scb), - aic_inb(p, SCSISIGI), - aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), - aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); - printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n", p->host_no, - CTL_OF_SCB(scb), - (p->features & AHC_ULTRA2) ? aic_inb(p, SG_CACHEPTR) : 0, - aic_inb(p, SSTAT2), - aic_inb(p, STCNT + 2) << 16 | aic_inb(p, STCNT + 1) << 8 | - aic_inb(p, STCNT)); - } - - channel = cmd->device->channel; - - /* - * Send a Device Reset Message: - * The target that is holding up the bus may not be the same as - * the one that triggered this timeout (different commands have - * different timeout lengths). Our strategy here is to queue an - * abort message to the timed out target if it is disconnected. - * Otherwise, if we have an active target we stuff the message buffer - * with an abort message and assert ATN in the hopes that the target - * will let go of the bus and go to the mesgout phase. If this - * fails, we'll get another timeout a few seconds later which will - * attempt a bus reset. - */ - saved_scbptr = aic_inb(p, SCBPTR); - disconnected = FALSE; - - if (lastphase != P_BUSFREE) - { - if (aic_inb(p, SCB_TAG) >= p->scb_data->numscbs) - { - printk(WARN_LEAD "Invalid SCB ID %d is active, " - "SCB flags = 0x%x.\n", p->host_no, - CTL_OF_CMD(cmd), scb->hscb->tag, scb->flags); - unpause_sequencer(p, FALSE); - return FAILED; - } - if (scb->hscb->tag == aic_inb(p, SCB_TAG)) - { - if ( (lastphase == P_MESGOUT) || (lastphase == P_MESGIN) ) - { - printk(WARN_LEAD "Device reset, Message buffer " - "in use\n", p->host_no, CTL_OF_SCB(scb)); - unpause_sequencer(p, FALSE); - return FAILED; - } - - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - printk(INFO_LEAD "Device reset message in " - "message buffer\n", p->host_no, CTL_OF_SCB(scb)); - scb->flags |= SCB_RESET | SCB_DEVICE_RESET; - aic7xxx_error(cmd) = DID_RESET; - aic_dev->flags |= BUS_DEVICE_RESET_PENDING; - /* Send the abort message to the active SCB. */ - aic_outb(p, HOST_MSG, MSG_OUT); - aic_outb(p, lastphase | ATNO, SCSISIGO); - unpause_sequencer(p, FALSE); - spin_unlock_irq(p->host->host_lock); - ssleep(1); - spin_lock_irq(p->host->host_lock); - if(aic_dev->flags & BUS_DEVICE_RESET_PENDING) - return FAILED; - else - return SUCCESS; - } - } /* if (last_phase != P_BUSFREE).....indicates we are idle and can work */ - /* - * Simply set the MK_MESSAGE flag and the SEQINT handler will do - * the rest on a reconnect/connect. - */ - scb->hscb->control |= MK_MESSAGE; - scb->flags |= SCB_RESET | SCB_DEVICE_RESET; - aic_dev->flags |= BUS_DEVICE_RESET_PENDING; - /* - * Check to see if the command is on the qinfifo. If it is, then we will - * not need to queue the command again since the card should start it soon - */ - if (aic7xxx_search_qinfifo(p, cmd->device->channel, cmd->device->id, cmd->device->lun, hscb->tag, - 0, TRUE, NULL) == 0) - { - disconnected = TRUE; - if ((hscb_index = aic7xxx_find_scb(p, scb)) != SCB_LIST_NULL) - { - unsigned char scb_control; - - aic_outb(p, hscb_index, SCBPTR); - scb_control = aic_inb(p, SCB_CONTROL); - /* - * If the DISCONNECTED bit is not set in SCB_CONTROL, then we are - * actually on the waiting list, not disconnected, and we don't - * need to requeue the command. - */ - disconnected = (scb_control & DISCONNECTED); - aic_outb(p, scb_control | MK_MESSAGE, SCB_CONTROL); - } - if (disconnected) - { - /* - * Actually requeue this SCB in case we can select the - * device before it reconnects. This can result in the command - * being on the qinfifo twice, but we don't care because it will - * all get cleaned up if/when the reset takes place. - */ - if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) - printk(INFO_LEAD "Queueing device reset command.\n", p->host_no, - CTL_OF_SCB(scb)); - p->qinfifo[p->qinfifonext++] = scb->hscb->tag; - if (p->features & AHC_QUEUE_REGS) - aic_outb(p, p->qinfifonext, HNSCB_QOFF); - else - aic_outb(p, p->qinfifonext, KERNEL_QINPOS); - scb->flags |= SCB_QUEUED_ABORT; - } - } - aic_outb(p, saved_scbptr, SCBPTR); - unpause_sequencer(p, FALSE); - spin_unlock_irq(p->host->host_lock); - msleep(1000/4); - spin_lock_irq(p->host->host_lock); - if(aic_dev->flags & BUS_DEVICE_RESET_PENDING) - return FAILED; - else - return SUCCESS; -} - -static int aic7xxx_bus_device_reset(struct scsi_cmnd *cmd) -{ - int rc; - - spin_lock_irq(cmd->device->host->host_lock); - rc = __aic7xxx_bus_device_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); - - return rc; -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_panic_abort - * - * Description: - * Abort the current SCSI command(s). - *-F*************************************************************************/ -static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd) -{ - - printk("aic7xxx driver version %s\n", AIC7XXX_C_VERSION); - printk("Controller type:\n %s\n", board_names[p->board_name_index]); - printk("p->flags=0x%lx, p->chip=0x%x, p->features=0x%x, " - "sequencer %s paused\n", - p->flags, p->chip, p->features, - (aic_inb(p, HCNTRL) & PAUSE) ? "is" : "isn't" ); - pause_sequencer(p); - disable_irq(p->irq); - aic7xxx_print_card(p); - aic7xxx_print_scratch_ram(p); - spin_unlock_irq(p->host->host_lock); - for(;;) barrier(); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_abort - * - * Description: - * Abort the current SCSI command(s). - *-F*************************************************************************/ -static int __aic7xxx_abort(struct scsi_cmnd *cmd) -{ - struct aic7xxx_scb *scb = NULL; - struct aic7xxx_host *p; - int found=0, disconnected; - unsigned char saved_hscbptr, hscbptr, scb_control; - struct aic_dev_data *aic_dev; - - if(cmd == NULL) - { - printk(KERN_ERR "aic7xxx_abort: called with NULL cmd!\n"); - return FAILED; - } - p = (struct aic7xxx_host *)cmd->device->host->hostdata; - aic_dev = AIC_DEV(cmd); - if(aic7xxx_position(cmd) < p->scb_data->numscbs) - scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]); - else - return FAILED; - - aic7xxx_isr(p); - aic7xxx_done_cmds_complete(p); - /* If the command was already complete or just completed, then we didn't - * do a reset, return FAILED */ - if(!(scb->flags & SCB_ACTIVE)) - return FAILED; - - pause_sequencer(p); - - /* - * I added a new config option to the driver: "panic_on_abort" that will - * cause the driver to panic and the machine to stop on the first abort - * or reset call into the driver. At that point, it prints out a lot of - * useful information for me which I can then use to try and debug the - * problem. Simply enable the boot time prompt in order to activate this - * code. - */ - if (aic7xxx_panic_on_abort) - aic7xxx_panic_abort(p, cmd); - - if (aic7xxx_verbose & VERBOSE_ABORT) - { - printk(INFO_LEAD "Aborting scb %d, flags 0x%x, SEQADDR 0x%x, LASTPHASE " - "0x%x\n", - p->host_no, CTL_OF_SCB(scb), scb->hscb->tag, scb->flags, - aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), - aic_inb(p, LASTPHASE)); - printk(INFO_LEAD "SG_CACHEPTR 0x%x, SG_COUNT %d, SCSISIGI 0x%x\n", - p->host_no, CTL_OF_SCB(scb), (p->features & AHC_ULTRA2) ? - aic_inb(p, SG_CACHEPTR) : 0, aic_inb(p, SG_COUNT), - aic_inb(p, SCSISIGI)); - printk(INFO_LEAD "SSTAT0 0x%x, SSTAT1 0x%x, SSTAT2 0x%x\n", - p->host_no, CTL_OF_SCB(scb), aic_inb(p, SSTAT0), - aic_inb(p, SSTAT1), aic_inb(p, SSTAT2)); - } - - if (scb->flags & SCB_WAITINGQ) - { - if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) - printk(INFO_LEAD "SCB found on waiting list and " - "aborted.\n", p->host_no, CTL_OF_SCB(scb)); - scbq_remove(&p->waiting_scbs, scb); - scbq_remove(&aic_dev->delayed_scbs, scb); - aic_dev->active_cmds++; - p->activescbs++; - scb->flags &= ~(SCB_WAITINGQ | SCB_ACTIVE); - scb->flags |= SCB_ABORT | SCB_QUEUED_FOR_DONE; - goto success; - } - -/* - * We just checked the waiting_q, now for the QINFIFO - */ - if ( ((found = aic7xxx_search_qinfifo(p, cmd->device->id, cmd->device->channel, - cmd->device->lun, scb->hscb->tag, SCB_ABORT | SCB_QUEUED_FOR_DONE, - FALSE, NULL)) != 0) && - (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)) - { - printk(INFO_LEAD "SCB found in QINFIFO and aborted.\n", p->host_no, - CTL_OF_SCB(scb)); - goto success; - } - -/* - * QINFIFO, waitingq, completeq done. Next, check WAITING_SCB list in card - */ - - saved_hscbptr = aic_inb(p, SCBPTR); - if ((hscbptr = aic7xxx_find_scb(p, scb)) != SCB_LIST_NULL) - { - aic_outb(p, hscbptr, SCBPTR); - scb_control = aic_inb(p, SCB_CONTROL); - disconnected = scb_control & DISCONNECTED; - /* - * If the DISCONNECTED bit is not set in SCB_CONTROL, then we are - * either currently active or on the waiting list. - */ - if(!disconnected && aic_inb(p, LASTPHASE) == P_BUSFREE) { - if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) - printk(INFO_LEAD "SCB found on hardware waiting" - " list and aborted.\n", p->host_no, CTL_OF_SCB(scb)); - /* If we are the only waiting command, stop the selection engine */ - if (aic_inb(p, WAITING_SCBH) == hscbptr && aic_inb(p, SCB_NEXT) == - SCB_LIST_NULL) - { - aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); - aic_outb(p, CLRSELTIMEO, CLRSINT1); - aic_outb(p, SCB_LIST_NULL, WAITING_SCBH); - } - else - { - unsigned char prev, next; - prev = SCB_LIST_NULL; - next = aic_inb(p, WAITING_SCBH); - while(next != SCB_LIST_NULL) - { - aic_outb(p, next, SCBPTR); - if (next == hscbptr) - { - next = aic_inb(p, SCB_NEXT); - if (prev != SCB_LIST_NULL) - { - aic_outb(p, prev, SCBPTR); - aic_outb(p, next, SCB_NEXT); - } - else - aic_outb(p, next, WAITING_SCBH); - aic_outb(p, hscbptr, SCBPTR); - next = SCB_LIST_NULL; - } - else - { - prev = next; - next = aic_inb(p, SCB_NEXT); - } - } - } - aic_outb(p, SCB_LIST_NULL, SCB_TAG); - aic_outb(p, 0, SCB_CONTROL); - aic7xxx_add_curscb_to_free_list(p); - scb->flags = SCB_ABORT | SCB_QUEUED_FOR_DONE; - goto success; - } - else if (!disconnected) - { - /* - * We are the currently active command - */ - if((aic_inb(p, LASTPHASE) == P_MESGIN) || - (aic_inb(p, LASTPHASE) == P_MESGOUT)) - { - /* - * Message buffer busy, unable to abort - */ - printk(INFO_LEAD "message buffer busy, unable to abort.\n", - p->host_no, CTL_OF_SCB(scb)); - unpause_sequencer(p, FALSE); - return FAILED; - } - /* Fallthrough to below, set ATNO after we set SCB_CONTROL */ - } - aic_outb(p, scb_control | MK_MESSAGE, SCB_CONTROL); - if(!disconnected) - { - aic_outb(p, HOST_MSG, MSG_OUT); - aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); - } - aic_outb(p, saved_hscbptr, SCBPTR); - } - else - { - /* - * The scb isn't in the card at all and it is active and it isn't in - * any of the queues, so it must be disconnected and paged out. Fall - * through to the code below. - */ - disconnected = 1; - } - - p->flags |= AHC_ABORT_PENDING; - scb->flags |= SCB_QUEUED_ABORT | SCB_ABORT | SCB_RECOVERY_SCB; - scb->hscb->control |= MK_MESSAGE; - if(disconnected) - { - if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) - printk(INFO_LEAD "SCB disconnected. Queueing Abort" - " SCB.\n", p->host_no, CTL_OF_SCB(scb)); - p->qinfifo[p->qinfifonext++] = scb->hscb->tag; - if (p->features & AHC_QUEUE_REGS) - aic_outb(p, p->qinfifonext, HNSCB_QOFF); - else - aic_outb(p, p->qinfifonext, KERNEL_QINPOS); - } - unpause_sequencer(p, FALSE); - spin_unlock_irq(p->host->host_lock); - msleep(1000/4); - spin_lock_irq(p->host->host_lock); - if (p->flags & AHC_ABORT_PENDING) - { - if (aic7xxx_verbose & VERBOSE_ABORT_RETURN) - printk(INFO_LEAD "Abort never delivered, returning FAILED\n", p->host_no, - CTL_OF_CMD(cmd)); - p->flags &= ~AHC_ABORT_PENDING; - return FAILED; - } - if (aic7xxx_verbose & VERBOSE_ABORT_RETURN) - printk(INFO_LEAD "Abort successful.\n", p->host_no, CTL_OF_CMD(cmd)); - return SUCCESS; - -success: - if (aic7xxx_verbose & VERBOSE_ABORT_RETURN) - printk(INFO_LEAD "Abort successful.\n", p->host_no, CTL_OF_CMD(cmd)); - aic7xxx_run_done_queue(p, TRUE); - unpause_sequencer(p, FALSE); - return SUCCESS; -} - -static int aic7xxx_abort(struct scsi_cmnd *cmd) -{ - int rc; - - spin_lock_irq(cmd->device->host->host_lock); - rc = __aic7xxx_abort(cmd); - spin_unlock_irq(cmd->device->host->host_lock); - - return rc; -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_reset - * - * Description: - * Resetting the bus always succeeds - is has to, otherwise the - * kernel will panic! Try a surgical technique - sending a BUS - * DEVICE RESET message - on the offending target before pulling - * the SCSI bus reset line. - *-F*************************************************************************/ -static int aic7xxx_reset(struct scsi_cmnd *cmd) -{ - struct aic7xxx_scb *scb; - struct aic7xxx_host *p; - struct aic_dev_data *aic_dev; - - p = (struct aic7xxx_host *) cmd->device->host->hostdata; - spin_lock_irq(p->host->host_lock); - - aic_dev = AIC_DEV(cmd); - if(aic7xxx_position(cmd) < p->scb_data->numscbs) - { - scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]); - if (scb->cmd != cmd) - scb = NULL; - } - else - { - scb = NULL; - } - - /* - * I added a new config option to the driver: "panic_on_abort" that will - * cause the driver to panic and the machine to stop on the first abort - * or reset call into the driver. At that point, it prints out a lot of - * useful information for me which I can then use to try and debug the - * problem. Simply enable the boot time prompt in order to activate this - * code. - */ - if (aic7xxx_panic_on_abort) - aic7xxx_panic_abort(p, cmd); - - pause_sequencer(p); - - while((aic_inb(p, INTSTAT) & INT_PEND) && !(p->flags & AHC_IN_ISR)) - { - aic7xxx_isr(p); - pause_sequencer(p); - } - aic7xxx_done_cmds_complete(p); - - if(scb && (scb->cmd == NULL)) - { - /* - * We just completed the command when we ran the isr stuff, so we no - * longer have it. - */ - unpause_sequencer(p, FALSE); - spin_unlock_irq(p->host->host_lock); - return SUCCESS; - } - -/* - * By this point, we want to already know what we are going to do and - * only have the following code implement our course of action. - */ - aic7xxx_reset_channel(p, cmd->device->channel, TRUE); - if (p->features & AHC_TWIN) - { - aic7xxx_reset_channel(p, cmd->device->channel ^ 0x01, TRUE); - restart_sequencer(p); - } - aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1); - aic7xxx_clear_intstat(p); - p->flags &= ~AHC_HANDLING_REQINITS; - p->msg_type = MSG_TYPE_NONE; - p->msg_index = 0; - p->msg_len = 0; - aic7xxx_run_done_queue(p, TRUE); - unpause_sequencer(p, FALSE); - spin_unlock_irq(p->host->host_lock); - ssleep(2); - return SUCCESS; -} - -/*+F************************************************************************* - * Function: - * aic7xxx_biosparam - * - * Description: - * Return the disk geometry for the given SCSI device. - * - * Note: - * This function is broken for today's really large drives and needs - * fixed. - *-F*************************************************************************/ -static int -aic7xxx_biosparam(struct scsi_device *sdev, struct block_device *bdev, - sector_t capacity, int geom[]) -{ - sector_t heads, sectors, cylinders; - int ret; - struct aic7xxx_host *p; - unsigned char *buf; - - p = (struct aic7xxx_host *) sdev->host->hostdata; - buf = scsi_bios_ptable(bdev); - - if ( buf ) - { - ret = scsi_partsize(buf, capacity, &geom[2], &geom[0], &geom[1]); - kfree(buf); - if ( ret != -1 ) - return(ret); - } - - heads = 64; - sectors = 32; - cylinders = capacity >> 11; - - if ((p->flags & AHC_EXTEND_TRANS_A) && (cylinders > 1024)) - { - heads = 255; - sectors = 63; - cylinders = capacity >> 14; - if(capacity > (65535 * heads * sectors)) - cylinders = 65535; - else - cylinders = ((unsigned int)capacity) / (unsigned int)(heads * sectors); - } - - geom[0] = (int)heads; - geom[1] = (int)sectors; - geom[2] = (int)cylinders; - - return (0); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_release - * - * Description: - * Free the passed in Scsi_Host memory structures prior to unloading the - * module. - *-F*************************************************************************/ -static int -aic7xxx_release(struct Scsi_Host *host) -{ - struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata; - struct aic7xxx_host *next, *prev; - - if(p->irq) - free_irq(p->irq, p); -#ifdef MMAPIO - if(p->maddr) - { - iounmap(p->maddr); - } -#endif /* MMAPIO */ - if(!p->pdev) - release_region(p->base, MAXREG - MINREG); -#ifdef CONFIG_PCI - else { - pci_release_regions(p->pdev); - pci_dev_put(p->pdev); - } -#endif - prev = NULL; - next = first_aic7xxx; - while(next != NULL) - { - if(next == p) - { - if(prev == NULL) - first_aic7xxx = next->next; - else - prev->next = next->next; - } - else - { - prev = next; - } - next = next->next; - } - aic7xxx_free(p); - return(0); -} - -/*+F************************************************************************* - * Function: - * aic7xxx_print_card - * - * Description: - * Print out all of the control registers on the card - * - * NOTE: This function is not yet safe for use on the VLB and EISA - * controllers, so it isn't used on those controllers at all. - *-F*************************************************************************/ -static void -aic7xxx_print_card(struct aic7xxx_host *p) -{ - int i, j, k, chip; - static struct register_ranges { - int num_ranges; - int range_val[32]; - } cards_ds[] = { - { 0, {0,} }, /* none */ - {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1f, 0x1f, 0x60, 0x60, /*7771*/ - 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9b, 0x9f} }, - { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7850*/ - 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, - { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7860*/ - 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, - {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1c, 0x1f, 0x60, 0x60, /*7870*/ - 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, - {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1a, 0x1c, 0x1f, 0x60, 0x60, /*7880*/ - 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, - {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7890*/ - 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f, - 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc, - 0xfe, 0xff} }, - {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1b, 0x1f, 0x60, 0x60, /*7895*/ - 0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, - 0x9f, 0x9f, 0xe0, 0xf1} }, - {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7896*/ - 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f, - 0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc, - 0xfe, 0xff} }, - {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7892*/ - 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f, - 0xe0, 0xf1, 0xf4, 0xfc} }, - {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7899*/ - 0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f, - 0xe0, 0xf1, 0xf4, 0xfc} }, - }; - chip = p->chip & AHC_CHIPID_MASK; - printk("%s at ", - board_names[p->board_name_index]); - switch(p->chip & ~AHC_CHIPID_MASK) - { - case AHC_VL: - printk("VLB Slot %d.\n", p->pci_device_fn); - break; - case AHC_EISA: - printk("EISA Slot %d.\n", p->pci_device_fn); - break; - case AHC_PCI: - default: - printk("PCI %d/%d/%d.\n", p->pci_bus, PCI_SLOT(p->pci_device_fn), - PCI_FUNC(p->pci_device_fn)); - break; - } - - /* - * the registers on the card.... - */ - printk("Card Dump:\n"); - k = 0; - for(i=0; i<cards_ds[chip].num_ranges; i++) - { - for(j = cards_ds[chip].range_val[ i * 2 ]; - j <= cards_ds[chip].range_val[ i * 2 + 1 ] ; - j++) - { - printk("%02x:%02x ", j, aic_inb(p, j)); - if(++k == 13) - { - printk("\n"); - k=0; - } - } - } - if(k != 0) - printk("\n"); - - /* - * If this was an Ultra2 controller, then we just hosed the card in terms - * of the QUEUE REGS. This function is only called at init time or by - * the panic_abort function, so it's safe to assume a generic init time - * setting here - */ - - if(p->features & AHC_QUEUE_REGS) - { - aic_outb(p, 0, SDSCB_QOFF); - aic_outb(p, 0, SNSCB_QOFF); - aic_outb(p, 0, HNSCB_QOFF); - } - -} - -/*+F************************************************************************* - * Function: - * aic7xxx_print_scratch_ram - * - * Description: - * Print out the scratch RAM values on the card. - *-F*************************************************************************/ -static void -aic7xxx_print_scratch_ram(struct aic7xxx_host *p) -{ - int i, k; - - k = 0; - printk("Scratch RAM:\n"); - for(i = SRAM_BASE; i < SEQCTL; i++) - { - printk("%02x:%02x ", i, aic_inb(p, i)); - if(++k == 13) - { - printk("\n"); - k=0; - } - } - if (p->features & AHC_MORE_SRAM) - { - for(i = TARG_OFFSET; i < 0x80; i++) - { - printk("%02x:%02x ", i, aic_inb(p, i)); - if(++k == 13) - { - printk("\n"); - k=0; - } - } - } - printk("\n"); -} - - -#include "aic7xxx_old/aic7xxx_proc.c" - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(AIC7XXX_H_VERSION); - - -static struct scsi_host_template driver_template = { - .proc_info = aic7xxx_proc_info, - .detect = aic7xxx_detect, - .release = aic7xxx_release, - .info = aic7xxx_info, - .queuecommand = aic7xxx_queue, - .slave_alloc = aic7xxx_slave_alloc, - .slave_configure = aic7xxx_slave_configure, - .slave_destroy = aic7xxx_slave_destroy, - .bios_param = aic7xxx_biosparam, - .eh_abort_handler = aic7xxx_abort, - .eh_device_reset_handler = aic7xxx_bus_device_reset, - .eh_host_reset_handler = aic7xxx_reset, - .can_queue = 255, - .this_id = -1, - .max_sectors = 2048, - .cmd_per_lun = 3, - .use_clustering = ENABLE_CLUSTERING, -}; - -#include "scsi_module.c" - -/* - * Overrides for Emacs so that we almost follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 2 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -2 - * c-argdecl-indent: 2 - * c-label-offset: -2 - * c-continued-statement-offset: 2 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.h b/drivers/scsi/aic7xxx_old/aic7xxx.h deleted file mode 100644 index 0116c8128a6..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx.h +++ /dev/null @@ -1,28 +0,0 @@ -/*+M************************************************************************* - * Adaptec AIC7xxx device driver for Linux. - * - * Copyright (c) 1994 John Aycock - * The University of Calgary Department of Computer Science. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - * - * $Id: aic7xxx.h,v 3.2 1996/07/23 03:37:26 deang Exp $ - *-M*************************************************************************/ -#ifndef _aic7xxx_h -#define _aic7xxx_h - -#define AIC7XXX_H_VERSION "5.2.0" - -#endif /* _aic7xxx_h */ diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.reg b/drivers/scsi/aic7xxx_old/aic7xxx.reg deleted file mode 100644 index f67b4bced01..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx.reg +++ /dev/null @@ -1,1401 +0,0 @@ -/* - * Aic7xxx register and scratch ram definitions. - * - * Copyright (c) 1994-1998 Justin Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of - * the GNU General Public License ("GPL") and the terms of the GPL would require the - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: aic7xxx.reg,v 1.4 1997/06/27 19:38:39 gibbs Exp $ - */ - -/* - * This file is processed by the aic7xxx_asm utility for use in assembling - * firmware for the aic7xxx family of SCSI host adapters as well as to generate - * a C header file for use in the kernel portion of the Aic7xxx driver. - * - * All page numbers refer to the Adaptec AIC-7770 Data Book available from - * Adaptec's Technical Documents Department 1-800-934-2766 - */ - -/* - * SCSI Sequence Control (p. 3-11). - * Each bit, when set starts a specific SCSI sequence on the bus - */ -register SCSISEQ { - address 0x000 - access_mode RW - bit TEMODE 0x80 - bit ENSELO 0x40 - bit ENSELI 0x20 - bit ENRSELI 0x10 - bit ENAUTOATNO 0x08 - bit ENAUTOATNI 0x04 - bit ENAUTOATNP 0x02 - bit SCSIRSTO 0x01 -} - -/* - * SCSI Transfer Control 0 Register (pp. 3-13). - * Controls the SCSI module data path. - */ -register SXFRCTL0 { - address 0x001 - access_mode RW - bit DFON 0x80 - bit DFPEXP 0x40 - bit FAST20 0x20 - bit CLRSTCNT 0x10 - bit SPIOEN 0x08 - bit SCAMEN 0x04 - bit CLRCHN 0x02 -} - -/* - * SCSI Transfer Control 1 Register (pp. 3-14,15). - * Controls the SCSI module data path. - */ -register SXFRCTL1 { - address 0x002 - access_mode RW - bit BITBUCKET 0x80 - bit SWRAPEN 0x40 - bit ENSPCHK 0x20 - mask STIMESEL 0x18 - bit ENSTIMER 0x04 - bit ACTNEGEN 0x02 - bit STPWEN 0x01 /* Powered Termination */ -} - -/* - * SCSI Control Signal Read Register (p. 3-15). - * Reads the actual state of the SCSI bus pins - */ -register SCSISIGI { - address 0x003 - access_mode RO - bit CDI 0x80 - bit IOI 0x40 - bit MSGI 0x20 - bit ATNI 0x10 - bit SELI 0x08 - bit BSYI 0x04 - bit REQI 0x02 - bit ACKI 0x01 -/* - * Possible phases in SCSISIGI - */ - mask PHASE_MASK CDI|IOI|MSGI - mask P_DATAOUT 0x00 - mask P_DATAIN IOI - mask P_COMMAND CDI - mask P_MESGOUT CDI|MSGI - mask P_STATUS CDI|IOI - mask P_MESGIN CDI|IOI|MSGI -} - -/* - * SCSI Control Signal Write Register (p. 3-16). - * Writing to this register modifies the control signals on the bus. Only - * those signals that are allowed in the current mode (Initiator/Target) are - * asserted. - */ -register SCSISIGO { - address 0x003 - access_mode WO - bit CDO 0x80 - bit IOO 0x40 - bit MSGO 0x20 - bit ATNO 0x10 - bit SELO 0x08 - bit BSYO 0x04 - bit REQO 0x02 - bit ACKO 0x01 -/* - * Possible phases to write into SCSISIG0 - */ - mask PHASE_MASK CDI|IOI|MSGI - mask P_DATAOUT 0x00 - mask P_DATAIN IOI - mask P_COMMAND CDI - mask P_MESGOUT CDI|MSGI - mask P_STATUS CDI|IOI - mask P_MESGIN CDI|IOI|MSGI -} - -/* - * SCSI Rate Control (p. 3-17). - * Contents of this register determine the Synchronous SCSI data transfer - * rate and the maximum synchronous Req/Ack offset. An offset of 0 in the - * SOFS (3:0) bits disables synchronous data transfers. Any offset value - * greater than 0 enables synchronous transfers. - */ -register SCSIRATE { - address 0x004 - access_mode RW - bit WIDEXFER 0x80 /* Wide transfer control */ - mask SXFR 0x70 /* Sync transfer rate */ - mask SXFR_ULTRA2 0x7f /* Sync transfer rate */ - mask SOFS 0x0f /* Sync offset */ -} - -/* - * SCSI ID (p. 3-18). - * Contains the ID of the board and the current target on the - * selected channel. - */ -register SCSIID { - address 0x005 - access_mode RW - mask TID 0xf0 /* Target ID mask */ - mask OID 0x0f /* Our ID mask */ - /* - * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book) - * The aic7890/91 allow an offset of up to 127 transfers in both wide - * and narrow mode. - */ - alias SCSIOFFSET - mask SOFS_ULTRA2 0x7f /* Sync offset U2 chips */ -} - -/* - * SCSI Latched Data (p. 3-19). - * Read/Write latches used to transfer data on the SCSI bus during - * Automatic or Manual PIO mode. SCSIDATH can be used for the - * upper byte of a 16bit wide asynchronouse data phase transfer. - */ -register SCSIDATL { - address 0x006 - access_mode RW -} - -register SCSIDATH { - address 0x007 - access_mode RW -} - -/* - * SCSI Transfer Count (pp. 3-19,20) - * These registers count down the number of bytes transferred - * across the SCSI bus. The counter is decremented only once - * the data has been safely transferred. SDONE in SSTAT0 is - * set when STCNT goes to 0 - */ -register STCNT { - address 0x008 - size 3 - access_mode RW -} - -/* - * Option Mode Register (Alternate Mode) (p. 5-198) - * This register is used to set certain options on Ultra3 based chips. - * The chip must be in alternate mode (bit ALT_MODE in SFUNCT must be set) - */ -register OPTIONMODE { - address 0x008 - access_mode RW - bit AUTORATEEN 0x80 - bit AUTOACKEN 0x40 - bit ATNMGMNTEN 0x20 - bit BUSFREEREV 0x10 - bit EXPPHASEDIS 0x08 - bit SCSIDATL_IMGEN 0x04 - bit AUTO_MSGOUT_DE 0x02 - bit DIS_MSGIN_DUALEDGE 0x01 -} - - -/* - * Clear SCSI Interrupt 0 (p. 3-20) - * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. - */ -register CLRSINT0 { - address 0x00b - access_mode WO - bit CLRSELDO 0x40 - bit CLRSELDI 0x20 - bit CLRSELINGO 0x10 - bit CLRSWRAP 0x08 - bit CLRSPIORDY 0x02 -} - -/* - * SCSI Status 0 (p. 3-21) - * Contains one set of SCSI Interrupt codes - * These are most likely of interest to the sequencer - */ -register SSTAT0 { - address 0x00b - access_mode RO - bit TARGET 0x80 /* Board acting as target */ - bit SELDO 0x40 /* Selection Done */ - bit SELDI 0x20 /* Board has been selected */ - bit SELINGO 0x10 /* Selection In Progress */ - bit SWRAP 0x08 /* 24bit counter wrap */ - bit IOERR 0x08 /* LVD Tranceiver mode changed */ - bit SDONE 0x04 /* STCNT = 0x000000 */ - bit SPIORDY 0x02 /* SCSI PIO Ready */ - bit DMADONE 0x01 /* DMA transfer completed */ -} - -/* - * Clear SCSI Interrupt 1 (p. 3-23) - * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. - */ -register CLRSINT1 { - address 0x00c - access_mode WO - bit CLRSELTIMEO 0x80 - bit CLRATNO 0x40 - bit CLRSCSIRSTI 0x20 - bit CLRBUSFREE 0x08 - bit CLRSCSIPERR 0x04 - bit CLRPHASECHG 0x02 - bit CLRREQINIT 0x01 -} - -/* - * SCSI Status 1 (p. 3-24) - */ -register SSTAT1 { - address 0x00c - access_mode RO - bit SELTO 0x80 - bit ATNTARG 0x40 - bit SCSIRSTI 0x20 - bit PHASEMIS 0x10 - bit BUSFREE 0x08 - bit SCSIPERR 0x04 - bit PHASECHG 0x02 - bit REQINIT 0x01 -} - -/* - * SCSI Status 2 (pp. 3-25,26) - */ -register SSTAT2 { - address 0x00d - access_mode RO - bit OVERRUN 0x80 - bit SHVALID 0x40 - bit WIDE_RES 0x20 - bit EXP_ACTIVE 0x10 /* SCSI Expander Active */ - bit CRCVALERR 0x08 /* CRC Value Error */ - bit CRCENDERR 0x04 /* CRC End Error */ - bit CRCREQERR 0x02 /* CRC REQ Error */ - bit DUAL_EDGE_ERROR 0x01 /* Invalid pins for Dual Edge phase */ - mask SFCNT 0x1f -} - -/* - * SCSI Status 3 (p. 3-26) - */ -register SSTAT3 { - address 0x00e - access_mode RO - mask SCSICNT 0xf0 - mask OFFCNT 0x0f -} - -/* - * SCSI ID for the aic7890/91 chips - */ -register SCSIID_ULTRA2 { - address 0x00f - access_mode RW - mask TID 0xf0 /* Target ID mask */ - mask OID 0x0f /* Our ID mask */ -} - -/* - * SCSI Interrupt Mode 1 (p. 3-28) - * Setting any bit will enable the corresponding function - * in SIMODE0 to interrupt via the IRQ pin. - */ -register SIMODE0 { - address 0x010 - access_mode RW - bit ENSELDO 0x40 - bit ENSELDI 0x20 - bit ENSELINGO 0x10 - bit ENSWRAP 0x08 - bit ENIOERR 0x08 /* LVD Tranceiver mode changes */ - bit ENSDONE 0x04 - bit ENSPIORDY 0x02 - bit ENDMADONE 0x01 -} - -/* - * SCSI Interrupt Mode 1 (pp. 3-28,29) - * Setting any bit will enable the corresponding function - * in SIMODE1 to interrupt via the IRQ pin. - */ -register SIMODE1 { - address 0x011 - access_mode RW - bit ENSELTIMO 0x80 - bit ENATNTARG 0x40 - bit ENSCSIRST 0x20 - bit ENPHASEMIS 0x10 - bit ENBUSFREE 0x08 - bit ENSCSIPERR 0x04 - bit ENPHASECHG 0x02 - bit ENREQINIT 0x01 -} - -/* - * SCSI Data Bus (High) (p. 3-29) - * This register reads data on the SCSI Data bus directly. - */ -register SCSIBUSL { - address 0x012 - access_mode RO -} - -register SCSIBUSH { - address 0x013 - access_mode RO -} - -/* - * SCSI/Host Address (p. 3-30) - * These registers hold the host address for the byte about to be - * transferred on the SCSI bus. They are counted up in the same - * manner as STCNT is counted down. SHADDR should always be used - * to determine the address of the last byte transferred since HADDR - * can be skewed by write ahead. - */ -register SHADDR { - address 0x014 - size 4 - access_mode RO -} - -/* - * Selection Timeout Timer (p. 3-30) - */ -register SELTIMER { - address 0x018 - access_mode RW - bit STAGE6 0x20 - bit STAGE5 0x10 - bit STAGE4 0x08 - bit STAGE3 0x04 - bit STAGE2 0x02 - bit STAGE1 0x01 -} - -/* - * Selection/Reselection ID (p. 3-31) - * Upper four bits are the device id. The ONEBIT is set when the re/selecting - * device did not set its own ID. - */ -register SELID { - address 0x019 - access_mode RW - mask SELID_MASK 0xf0 - bit ONEBIT 0x08 -} - -/* - * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book) - * Indicates if external logic has been attached to the chip to - * perform the tasks of accessing a serial eeprom, testing termination - * strength, and performing cable detection. On the aic7860, most of - * these features are handled on chip, but on the aic7855 an attached - * aic3800 does the grunt work. - */ -register SPIOCAP { - address 0x01b - access_mode RW - bit SOFT1 0x80 - bit SOFT0 0x40 - bit SOFTCMDEN 0x20 - bit HAS_BRDCTL 0x10 /* External Board control */ - bit SEEPROM 0x08 /* External serial eeprom logic */ - bit EEPROM 0x04 /* Writable external BIOS ROM */ - bit ROM 0x02 /* Logic for accessing external ROM */ - bit SSPIOCPS 0x01 /* Termination and cable detection */ -} - -/* - * SCSI Block Control (p. 3-32) - * Controls Bus type and channel selection. In a twin channel configuration - * addresses 0x00-0x1e are gated to the appropriate channel based on this - * register. SELWIDE allows for the coexistence of 8bit and 16bit devices - * on a wide bus. - */ -register SBLKCTL { - address 0x01f - access_mode RW - bit DIAGLEDEN 0x80 /* Aic78X0 only */ - bit DIAGLEDON 0x40 /* Aic78X0 only */ - bit AUTOFLUSHDIS 0x20 - bit SELBUSB 0x08 - bit ENAB40 0x08 /* LVD transceiver active */ - bit ENAB20 0x04 /* SE/HVD transceiver active */ - bit SELWIDE 0x02 - bit XCVR 0x01 /* External transceiver active */ -} - -/* - * Sequencer Control (p. 3-33) - * Error detection mode and speed configuration - */ -register SEQCTL { - address 0x060 - access_mode RW - bit PERRORDIS 0x80 - bit PAUSEDIS 0x40 - bit FAILDIS 0x20 - bit FASTMODE 0x10 - bit BRKADRINTEN 0x08 - bit STEP 0x04 - bit SEQRESET 0x02 - bit LOADRAM 0x01 -} - -/* - * Sequencer RAM Data (p. 3-34) - * Single byte window into the Scratch Ram area starting at the address - * specified by SEQADDR0 and SEQADDR1. To write a full word, simply write - * four bytes in succession. The SEQADDRs will increment after the most - * significant byte is written - */ -register SEQRAM { - address 0x061 - access_mode RW -} - -/* - * Sequencer Address Registers (p. 3-35) - * Only the first bit of SEQADDR1 holds addressing information - */ -register SEQADDR0 { - address 0x062 - access_mode RW -} - -register SEQADDR1 { - address 0x063 - access_mode RW - mask SEQADDR1_MASK 0x01 -} - -/* - * Accumulator - * We cheat by passing arguments in the Accumulator up to the kernel driver - */ -register ACCUM { - address 0x064 - access_mode RW - accumulator -} - -register SINDEX { - address 0x065 - access_mode RW - sindex -} - -register DINDEX { - address 0x066 - access_mode RW -} - -register ALLONES { - address 0x069 - access_mode RO - allones -} - -register ALLZEROS { - address 0x06a - access_mode RO - allzeros -} - -register NONE { - address 0x06a - access_mode WO - none -} - -register FLAGS { - address 0x06b - access_mode RO - bit ZERO 0x02 - bit CARRY 0x01 -} - -register SINDIR { - address 0x06c - access_mode RO -} - -register DINDIR { - address 0x06d - access_mode WO -} - -register FUNCTION1 { - address 0x06e - access_mode RW -} - -register STACK { - address 0x06f - access_mode RO -} - -/* - * Board Control (p. 3-43) - */ -register BCTL { - address 0x084 - access_mode RW - bit ACE 0x08 - bit ENABLE 0x01 -} - -register DSCOMMAND0 { - address 0x084 - access_mode RW - bit CACHETHEN 0x80 - bit DPARCKEN 0x40 - bit MPARCKEN 0x20 - bit EXTREQLCK 0x10 - bit INTSCBRAMSEL 0x08 - bit RAMPS 0x04 - bit USCBSIZE32 0x02 - bit CIOPARCKEN 0x01 -} - -/* - * On the aic78X0 chips, Board Control is replaced by the DSCommand - * register (p. 4-64) - */ -register DSCOMMAND { - address 0x084 - access_mode RW - bit CACHETHEN 0x80 /* Cache Threshold enable */ - bit DPARCKEN 0x40 /* Data Parity Check Enable */ - bit MPARCKEN 0x20 /* Memory Parity Check Enable */ - bit EXTREQLCK 0x10 /* External Request Lock */ -} - -/* - * Bus On/Off Time (p. 3-44) - */ -register BUSTIME { - address 0x085 - access_mode RW - mask BOFF 0xf0 - mask BON 0x0f -} - -/* - * Bus Speed (p. 3-45) - */ -register BUSSPD { - address 0x086 - access_mode RW - mask DFTHRSH 0xc0 - mask STBOFF 0x38 - mask STBON 0x07 - mask DFTHRSH_100 0xc0 -} - -/* - * Host Control (p. 3-47) R/W - * Overall host control of the device. - */ -register HCNTRL { - address 0x087 - access_mode RW - bit POWRDN 0x40 - bit SWINT 0x10 - bit IRQMS 0x08 - bit PAUSE 0x04 - bit INTEN 0x02 - bit CHIPRST 0x01 - bit CHIPRSTACK 0x01 -} - -/* - * Host Address (p. 3-48) - * This register contains the address of the byte about - * to be transferred across the host bus. - */ -register HADDR { - address 0x088 - size 4 - access_mode RW -} - -register HCNT { - address 0x08c - size 3 - access_mode RW -} - -/* - * SCB Pointer (p. 3-49) - * Gate one of the four SCBs into the SCBARRAY window. - */ -register SCBPTR { - address 0x090 - access_mode RW -} - -/* - * Interrupt Status (p. 3-50) - * Status for system interrupts - */ -register INTSTAT { - address 0x091 - access_mode RW - bit BRKADRINT 0x08 - bit SCSIINT 0x04 - bit CMDCMPLT 0x02 - bit SEQINT 0x01 - mask BAD_PHASE SEQINT /* unknown scsi bus phase */ - mask SEND_REJECT 0x10|SEQINT /* sending a message reject */ - mask NO_IDENT 0x20|SEQINT /* no IDENTIFY after reconnect*/ - mask NO_MATCH 0x30|SEQINT /* no cmd match for reconnect */ - mask EXTENDED_MSG 0x40|SEQINT /* Extended message received */ - mask WIDE_RESIDUE 0x50|SEQINT /* need kernel to back up */ - /* the SG array for us */ - mask REJECT_MSG 0x60|SEQINT /* Reject message received */ - mask BAD_STATUS 0x70|SEQINT /* Bad status from target */ - mask RESIDUAL 0x80|SEQINT /* Residual byte count != 0 */ - mask AWAITING_MSG 0xa0|SEQINT /* - * Kernel requested to specify - * a message to this target - * (command was null), so tell - * it that it can fill the - * message buffer. - */ - mask SEQ_SG_FIXUP 0xb0|SEQINT /* need help with fixing up - * the sg array pointer after - * a phasemis with no valid - * sg elements in the shadow - * pipeline. - */ - mask TRACEPOINT2 0xc0|SEQINT - mask MSGIN_PHASEMIS 0xd0|SEQINT /* - * Target changed phase on us - * when we were expecting - * another msgin byte. - */ - mask DATA_OVERRUN 0xe0|SEQINT /* - * Target attempted to write - * beyond the bounds of its - * command. - */ - - mask SEQINT_MASK 0xf0|SEQINT /* SEQINT Status Codes */ - mask INT_PEND (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT) -} - -/* - * Hard Error (p. 3-53) - * Reporting of catastrophic errors. You usually cannot recover from - * these without a full board reset. - */ -register ERROR { - address 0x092 - access_mode RO - bit CIOPARERR 0x80 /* Ultra2 only */ - bit PCIERRSTAT 0x40 /* PCI only */ - bit MPARERR 0x20 /* PCI only */ - bit DPARERR 0x10 /* PCI only */ - bit SQPARERR 0x08 - bit ILLOPCODE 0x04 - bit ILLSADDR 0x02 - bit DSCTMOUT 0x02 /* Ultra3 only */ - bit ILLHADDR 0x01 -} - -/* - * Clear Interrupt Status (p. 3-52) - */ -register CLRINT { - address 0x092 - access_mode WO - bit CLRPARERR 0x10 /* PCI only */ - bit CLRBRKADRINT 0x08 - bit CLRSCSIINT 0x04 - bit CLRCMDINT 0x02 - bit CLRSEQINT 0x01 -} - -register DFCNTRL { - address 0x093 - access_mode RW - bit PRELOADEN 0x80 /* aic7890 only */ - bit WIDEODD 0x40 - bit SCSIEN 0x20 - bit SDMAEN 0x10 - bit SDMAENACK 0x10 - bit HDMAEN 0x08 - bit HDMAENACK 0x08 - bit DIRECTION 0x04 - bit FIFOFLUSH 0x02 - bit FIFORESET 0x01 -} - -register DFSTATUS { - address 0x094 - access_mode RO - bit PRELOAD_AVAIL 0x80 - bit DWORDEMP 0x20 - bit MREQPEND 0x10 - bit HDONE 0x08 - bit DFTHRESH 0x04 - bit FIFOFULL 0x02 - bit FIFOEMP 0x01 -} - -register DFDAT { - address 0x099 - access_mode RW -} - -/* - * SCB Auto Increment (p. 3-59) - * Byte offset into the SCB Array and an optional bit to allow auto - * incrementing of the address during download and upload operations - */ -register SCBCNT { - address 0x09a - access_mode RW - bit SCBAUTO 0x80 - mask SCBCNT_MASK 0x1f -} - -/* - * Queue In FIFO (p. 3-60) - * Input queue for queued SCBs (commands that the seqencer has yet to start) - */ -register QINFIFO { - address 0x09b - access_mode RW -} - -/* - * Queue In Count (p. 3-60) - * Number of queued SCBs - */ -register QINCNT { - address 0x09c - access_mode RO -} - -/* - * SCSIDATL IMAGE Register (p. 5-104) - * Write to this register also go to SCSIDATL but this register will preserve - * the data for later reading as long as the SCSIDATL_IMGEN bit in the - * OPTIONMODE register is set. - */ -register SCSIDATL_IMG { - address 0x09c - access_mode RW -} - -/* - * Queue Out FIFO (p. 3-61) - * Queue of SCBs that have completed and await the host - */ -register QOUTFIFO { - address 0x09d - access_mode WO -} - -/* - * CRC Control 1 Register (p. 5-105) - * Control bits for the Ultra 160/m CRC facilities - */ -register CRCCONTROL1 { - address 0x09d - access_mode RW - bit CRCONSEEN 0x80 /* CRC ON Single Edge ENable */ - bit CRCVALCHKEN 0x40 /* CRC Value Check Enable */ - bit CRCENDCHKEN 0x20 /* CRC End Check Enable */ - bit CRCREQCHKEN 0x10 - bit TARGCRCENDEN 0x08 /* Enable End CRC transfer when target */ - bit TARGCRCCNTEN 0x04 /* Enable CRC transfer when target */ -} - -/* - * Queue Out Count (p. 3-61) - * Number of queued SCBs in the Out FIFO - */ -register QOUTCNT { - address 0x09e - access_mode RO -} - -/* - * SCSI Phase Register (p. 5-106) - * Current bus phase - */ -register SCSIPHASE { - address 0x09e - access_mode RO - bit SP_STATUS 0x20 - bit SP_COMMAND 0x10 - bit SP_MSG_IN 0x08 - bit SP_MSG_OUT 0x04 - bit SP_DATA_IN 0x02 - bit SP_DATA_OUT 0x01 -} - -/* - * Special Function - */ -register SFUNCT { - address 0x09f - access_mode RW - bit ALT_MODE 0x80 -} - -/* - * SCB Definition (p. 5-4) - */ -scb { - address 0x0a0 - SCB_CONTROL { - size 1 - bit MK_MESSAGE 0x80 - bit DISCENB 0x40 - bit TAG_ENB 0x20 - bit DISCONNECTED 0x04 - mask SCB_TAG_TYPE 0x03 - } - SCB_TCL { - size 1 - bit SELBUSB 0x08 - mask TID 0xf0 - mask LID 0x07 - } - SCB_TARGET_STATUS { - size 1 - } - SCB_SGCOUNT { - size 1 - } - SCB_SGPTR { - size 4 - } - SCB_RESID_SGCNT { - size 1 - } - SCB_RESID_DCNT { - size 3 - } - SCB_DATAPTR { - size 4 - } - SCB_DATACNT { - /* - * Really only 3 bytes, but padded to make - * the kernel's job easier. - */ - size 4 - } - SCB_CMDPTR { - size 4 - } - SCB_CMDLEN { - size 1 - } - SCB_TAG { - size 1 - } - SCB_NEXT { - size 1 - } - SCB_PREV { - size 1 - } - SCB_BUSYTARGETS { - size 4 - } -} - -const SG_SIZEOF 0x08 /* sizeof(struct ahc_dma) */ - -/* --------------------- AHA-2840-only definitions -------------------- */ - -register SEECTL_2840 { - address 0x0c0 - access_mode RW - bit CS_2840 0x04 - bit CK_2840 0x02 - bit DO_2840 0x01 -} - -register STATUS_2840 { - address 0x0c1 - access_mode RW - bit EEPROM_TF 0x80 - mask BIOS_SEL 0x60 - mask ADSEL 0x1e - bit DI_2840 0x01 -} - -/* --------------------- AIC-7870-only definitions -------------------- */ - -register DSPCISTATUS { - address 0x086 - mask DFTHRSH_100 0xc0 -} - -register CCHADDR { - address 0x0E0 - size 8 -} - -register CCHCNT { - address 0x0E8 -} - -register CCSGRAM { - address 0x0E9 -} - -register CCSGADDR { - address 0x0EA -} - -register CCSGCTL { - address 0x0EB - bit CCSGDONE 0x80 - bit CCSGEN 0x08 - bit FLAG 0x02 - bit CCSGRESET 0x01 -} - -register CCSCBCNT { - address 0xEF -} - -register CCSCBCTL { - address 0x0EE - bit CCSCBDONE 0x80 - bit ARRDONE 0x40 /* SCB Array prefetch done */ - bit CCARREN 0x10 - bit CCSCBEN 0x08 - bit CCSCBDIR 0x04 - bit CCSCBRESET 0x01 -} - -register CCSCBADDR { - address 0x0ED -} - -register CCSCBRAM { - address 0xEC -} - -register CCSCBPTR { - address 0x0F1 -} - -register HNSCB_QOFF { - address 0x0F4 -} - -register HESCB_QOFF { - address 0x0F5 -} - -register SNSCB_QOFF { - address 0x0F6 -} - -register SESCB_QOFF { - address 0x0F7 -} - -register SDSCB_QOFF { - address 0x0F8 -} - -register QOFF_CTLSTA { - address 0x0FA - bit ESTABLISH_SCB_AVAIL 0x80 - bit SCB_AVAIL 0x40 - bit SNSCB_ROLLOVER 0x20 - bit SDSCB_ROLLOVER 0x10 - bit SESCB_ROLLOVER 0x08 - mask SCB_QSIZE 0x07 - mask SCB_QSIZE_256 0x06 -} - -register DFF_THRSH { - address 0x0FB - mask WR_DFTHRSH 0x70 - mask RD_DFTHRSH 0x07 - mask RD_DFTHRSH_MIN 0x00 - mask RD_DFTHRSH_25 0x01 - mask RD_DFTHRSH_50 0x02 - mask RD_DFTHRSH_63 0x03 - mask RD_DFTHRSH_75 0x04 - mask RD_DFTHRSH_85 0x05 - mask RD_DFTHRSH_90 0x06 - mask RD_DFTHRSH_MAX 0x07 - mask WR_DFTHRSH_MIN 0x00 - mask WR_DFTHRSH_25 0x10 - mask WR_DFTHRSH_50 0x20 - mask WR_DFTHRSH_63 0x30 - mask WR_DFTHRSH_75 0x40 - mask WR_DFTHRSH_85 0x50 - mask WR_DFTHRSH_90 0x60 - mask WR_DFTHRSH_MAX 0x70 -} - -register SG_CACHEPTR { - access_mode RW - address 0x0fc - mask SG_USER_DATA 0xfc - bit LAST_SEG 0x02 - bit LAST_SEG_DONE 0x01 -} - -register BRDCTL { - address 0x01d - bit BRDDAT7 0x80 - bit BRDDAT6 0x40 - bit BRDDAT5 0x20 - bit BRDSTB 0x10 - bit BRDCS 0x08 - bit BRDRW 0x04 - bit BRDCTL1 0x02 - bit BRDCTL0 0x01 - /* 7890 Definitions */ - bit BRDDAT4 0x10 - bit BRDDAT3 0x08 - bit BRDDAT2 0x04 - bit BRDRW_ULTRA2 0x02 - bit BRDSTB_ULTRA2 0x01 -} - -/* - * Serial EEPROM Control (p. 4-92 in 7870 Databook) - * Controls the reading and writing of an external serial 1-bit - * EEPROM Device. In order to access the serial EEPROM, you must - * first set the SEEMS bit that generates a request to the memory - * port for access to the serial EEPROM device. When the memory - * port is not busy servicing another request, it reconfigures - * to allow access to the serial EEPROM. When this happens, SEERDY - * gets set high to verify that the memory port access has been - * granted. - * - * After successful arbitration for the memory port, the SEECS bit of - * the SEECTL register is connected to the chip select. The SEECK, - * SEEDO, and SEEDI are connected to the clock, data out, and data in - * lines respectively. The SEERDY bit of SEECTL is useful in that it - * gives us an 800 nsec timer. After a write to the SEECTL register, - * the SEERDY goes high 800 nsec later. The one exception to this is - * when we first request access to the memory port. The SEERDY goes - * high to signify that access has been granted and, for this case, has - * no implied timing. - * - * See 93cx6.c for detailed information on the protocol necessary to - * read the serial EEPROM. - */ -register SEECTL { - address 0x01e - bit EXTARBACK 0x80 - bit EXTARBREQ 0x40 - bit SEEMS 0x20 - bit SEERDY 0x10 - bit SEECS 0x08 - bit SEECK 0x04 - bit SEEDO 0x02 - bit SEEDI 0x01 -} -/* ---------------------- Scratch RAM Offsets ------------------------- */ -/* These offsets are either to values that are initialized by the board's - * BIOS or are specified by the sequencer code. - * - * The host adapter card (at least the BIOS) uses 20-2f for SCSI - * device information, 32-33 and 5a-5f as well. As it turns out, the - * BIOS trashes 20-2f, writing the synchronous negotiation results - * on top of the BIOS values, so we re-use those for our per-target - * scratchspace (actually a value that can be copied directly into - * SCSIRATE). The kernel driver will enable synchronous negotiation - * for all targets that have a value other than 0 in the lower four - * bits of the target scratch space. This should work regardless of - * whether the bios has been installed. - */ - -scratch_ram { - address 0x020 - - /* - * 1 byte per target starting at this address for configuration values - */ - TARG_SCSIRATE { - size 16 - } - /* - * Bit vector of targets that have ULTRA enabled. - */ - ULTRA_ENB { - size 2 - } - /* - * Bit vector of targets that have disconnection disabled. - */ - DISC_DSB { - size 2 - } - /* - * Single byte buffer used to designate the type or message - * to send to a target. - */ - MSG_OUT { - size 1 - } - /* Parameters for DMA Logic */ - DMAPARAMS { - size 1 - bit PRELOADEN 0x80 - bit WIDEODD 0x40 - bit SCSIEN 0x20 - bit SDMAEN 0x10 - bit SDMAENACK 0x10 - bit HDMAEN 0x08 - bit HDMAENACK 0x08 - bit DIRECTION 0x04 - bit FIFOFLUSH 0x02 - bit FIFORESET 0x01 - } - SEQ_FLAGS { - size 1 - bit IDENTIFY_SEEN 0x80 - bit SCBPTR_VALID 0x20 - bit DPHASE 0x10 - bit AMTARGET 0x08 - bit WIDE_BUS 0x02 - bit TWIN_BUS 0x01 - } - /* - * Temporary storage for the - * target/channel/lun of a - * reconnecting target - */ - SAVED_TCL { - size 1 - } - /* Working value of the number of SG segments left */ - SG_COUNT { - size 1 - } - /* Working value of SG pointer */ - SG_NEXT { - size 4 - } - /* - * The last bus phase as seen by the sequencer. - */ - LASTPHASE { - size 1 - bit CDI 0x80 - bit IOI 0x40 - bit MSGI 0x20 - mask PHASE_MASK CDI|IOI|MSGI - mask P_DATAOUT 0x00 - mask P_DATAIN IOI - mask P_COMMAND CDI - mask P_MESGOUT CDI|MSGI - mask P_STATUS CDI|IOI - mask P_MESGIN CDI|IOI|MSGI - mask P_BUSFREE 0x01 - } - /* - * head of list of SCBs awaiting - * selection - */ - WAITING_SCBH { - size 1 - } - /* - * head of list of SCBs that are - * disconnected. Used for SCB - * paging. - */ - DISCONNECTED_SCBH { - size 1 - } - /* - * head of list of SCBs that are - * not in use. Used for SCB paging. - */ - FREE_SCBH { - size 1 - } - /* - * Address of the hardware scb array in the host. - */ - HSCB_ADDR { - size 4 - } - /* - * Address of the 256 byte array storing the SCBID of outstanding - * untagged SCBs indexed by TCL. - */ - SCBID_ADDR { - size 4 - } - /* - * Address of the array of command descriptors used to store - * information about incoming selections. - */ - TMODE_CMDADDR { - size 4 - } - KERNEL_QINPOS { - size 1 - } - QINPOS { - size 1 - } - QOUTPOS { - size 1 - } - /* - * Offset into the command descriptor array for the next - * available desciptor to use. - */ - TMODE_CMDADDR_NEXT { - size 1 - } - ARG_1 { - size 1 - mask SEND_MSG 0x80 - mask SEND_SENSE 0x40 - mask SEND_REJ 0x20 - mask MSGOUT_PHASEMIS 0x10 - alias RETURN_1 - } - ARG_2 { - size 1 - alias RETURN_2 - } - - /* - * Snapshot of MSG_OUT taken after each message is sent. - */ - LAST_MSG { - size 1 - } - - /* - * Number of times we have filled the CCSGRAM with prefetched - * SG elements. - */ - PREFETCH_CNT { - size 1 - } - - - /* - * These are reserved registers in the card's scratch ram. Some of - * the values are specified in the AHA2742 technical reference manual - * and are initialized by the BIOS at boot time. - */ - SCSICONF { - address 0x05a - size 1 - bit TERM_ENB 0x80 - bit RESET_SCSI 0x40 - mask HSCSIID 0x07 /* our SCSI ID */ - mask HWSCSIID 0x0f /* our SCSI ID if Wide Bus */ - } - HOSTCONF { - address 0x05d - size 1 - } - HA_274_BIOSCTRL { - address 0x05f - size 1 - mask BIOSMODE 0x30 - mask BIOSDISABLED 0x30 - bit CHANNEL_B_PRIMARY 0x08 - } - /* - * Per target SCSI offset values for Ultra2 controllers. - */ - TARG_OFFSET { - address 0x070 - size 16 - } -} - -const SCB_LIST_NULL 0xff - -const CCSGADDR_MAX 0x80 -const CCSGRAM_MAXSEGS 16 - -/* Offsets into the SCBID array where different data is stored */ -const UNTAGGEDSCB_OFFSET 0 -const QOUTFIFO_OFFSET 1 -const QINFIFO_OFFSET 2 - -/* WDTR Message values */ -const BUS_8_BIT 0x00 -const BUS_16_BIT 0x01 -const BUS_32_BIT 0x02 - -/* Offset maximums */ -const MAX_OFFSET_8BIT 0x0f -const MAX_OFFSET_16BIT 0x08 -const MAX_OFFSET_ULTRA2 0x7f -const HOST_MSG 0xff - -/* Target mode command processing constants */ -const CMD_GROUP_CODE_SHIFT 0x05 -const CMD_GROUP0_BYTE_DELTA -4 -const CMD_GROUP2_BYTE_DELTA -6 -const CMD_GROUP4_BYTE_DELTA 4 -const CMD_GROUP5_BYTE_DELTA 11 - -/* - * Downloaded (kernel inserted) constants - */ - -/* - * Number of command descriptors in the command descriptor array. - */ -const TMODE_NUMCMDS download diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq deleted file mode 100644 index 823ff287322..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx.seq +++ /dev/null @@ -1,1539 +0,0 @@ -/* - * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD. - * - * Copyright (c) 1994-1999 Justin Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of - * the GNU General Public License (GPL) and the terms of the GPL would require the - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: aic7xxx.seq,v 1.77 1998/06/28 02:58:57 gibbs Exp $ - */ - -#include "aic7xxx.reg" -#include "scsi_message.h" - -/* - * A few words on the waiting SCB list: - * After starting the selection hardware, we check for reconnecting targets - * as well as for our selection to complete just in case the reselection wins - * bus arbitration. The problem with this is that we must keep track of the - * SCB that we've already pulled from the QINFIFO and started the selection - * on just in case the reselection wins so that we can retry the selection at - * a later time. This problem cannot be resolved by holding a single entry - * in scratch ram since a reconnecting target can request sense and this will - * create yet another SCB waiting for selection. The solution used here is to - * use byte 27 of the SCB as a pseudo-next pointer and to thread a list - * of SCBs that are awaiting selection. Since 0-0xfe are valid SCB indexes, - * SCB_LIST_NULL is 0xff which is out of range. An entry is also added to - * this list every time a request sense occurs or after completing a non-tagged - * command for which a second SCB has been queued. The sequencer will - * automatically consume the entries. - */ - -reset: - clr SCSISIGO; /* De-assert BSY */ - and SXFRCTL1, ~BITBUCKET; - /* Always allow reselection */ - mvi SCSISEQ, ENRSELI|ENAUTOATNP; - - if ((p->features & AHC_CMD_CHAN) != 0) { - /* Ensure that no DMA operations are in progress */ - clr CCSGCTL; - clr CCSCBCTL; - } - - call clear_target_state; -poll_for_work: - and SXFRCTL0, ~SPIOEN; - if ((p->features & AHC_QUEUE_REGS) == 0) { - mov A, QINPOS; - } -poll_for_work_loop: - if ((p->features & AHC_QUEUE_REGS) == 0) { - and SEQCTL, ~PAUSEDIS; - } - test SSTAT0, SELDO|SELDI jnz selection; - test SCSISEQ, ENSELO jnz poll_for_work; - if ((p->features & AHC_TWIN) != 0) { - /* - * Twin channel devices cannot handle things like SELTO - * interrupts on the "background" channel. So, if we - * are selecting, keep polling the current channel util - * either a selection or reselection occurs. - */ - xor SBLKCTL,SELBUSB; /* Toggle to the other bus */ - test SSTAT0, SELDO|SELDI jnz selection; - test SCSISEQ, ENSELO jnz poll_for_work; - xor SBLKCTL,SELBUSB; /* Toggle back */ - } - cmp WAITING_SCBH,SCB_LIST_NULL jne start_waiting; -test_queue: - /* Has the driver posted any work for us? */ - if ((p->features & AHC_QUEUE_REGS) != 0) { - test QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop; - mov NONE, SNSCB_QOFF; - inc QINPOS; - } else { - or SEQCTL, PAUSEDIS; - cmp KERNEL_QINPOS, A je poll_for_work_loop; - inc QINPOS; - and SEQCTL, ~PAUSEDIS; - } - -/* - * We have at least one queued SCB now and we don't have any - * SCBs in the list of SCBs awaiting selection. If we have - * any SCBs available for use, pull the tag from the QINFIFO - * and get to work on it. - */ - if ((p->flags & AHC_PAGESCBS) != 0) { - mov ALLZEROS call get_free_or_disc_scb; - } - -dequeue_scb: - add A, -1, QINPOS; - mvi QINFIFO_OFFSET call fetch_byte; - - if ((p->flags & AHC_PAGESCBS) == 0) { - /* In the non-paging case, the SCBID == hardware SCB index */ - mov SCBPTR, RETURN_2; - } -dma_queued_scb: -/* - * DMA the SCB from host ram into the current SCB location. - */ - mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; - mov RETURN_2 call dma_scb; - -/* - * Preset the residual fields in case we never go through a data phase. - * This isn't done by the host so we can avoid a DMA to clear these - * fields for the normal case of I/O that completes without underrun - * or overrun conditions. - */ - if ((p->features & AHC_CMD_CHAN) != 0) { - bmov SCB_RESID_DCNT, SCB_DATACNT, 3; - } else { - mov SCB_RESID_DCNT[0],SCB_DATACNT[0]; - mov SCB_RESID_DCNT[1],SCB_DATACNT[1]; - mov SCB_RESID_DCNT[2],SCB_DATACNT[2]; - } - mov SCB_RESID_SGCNT, SCB_SGCOUNT; - -start_scb: - /* - * Place us on the waiting list in case our selection - * doesn't win during bus arbitration. - */ - mov SCB_NEXT,WAITING_SCBH; - mov WAITING_SCBH, SCBPTR; -start_waiting: - /* - * Pull the first entry off of the waiting SCB list. - */ - mov SCBPTR, WAITING_SCBH; - call start_selection; - jmp poll_for_work; - -start_selection: - if ((p->features & AHC_TWIN) != 0) { - and SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */ - and A,SELBUSB,SCB_TCL; /* Get new channel bit */ - or SINDEX,A; - mov SBLKCTL,SINDEX; /* select channel */ - } -initialize_scsiid: - if ((p->features & AHC_ULTRA2) != 0) { - and A, TID, SCB_TCL; /* Get target ID */ - and SCSIID_ULTRA2, OID; /* Clear old target */ - or SCSIID_ULTRA2, A; - } else { - and A, TID, SCB_TCL; /* Get target ID */ - and SCSIID, OID; /* Clear old target */ - or SCSIID, A; - } - mov SCSIDATL, ALLZEROS; /* clear out the latched */ - /* data register, this */ - /* fixes a bug on some */ - /* controllers where the */ - /* last byte written to */ - /* this register can leak */ - /* onto the data bus at */ - /* bad times, such as during */ - /* selection timeouts */ - mvi SCSISEQ, ENSELO|ENAUTOATNO|ENRSELI|ENAUTOATNP ret; - -/* - * Initialize Ultra mode setting and clear the SCSI channel. - * SINDEX should contain any additional bit's the client wants - * set in SXFRCTL0. - */ -initialize_channel: - or SXFRCTL0, CLRSTCNT|CLRCHN, SINDEX; - if ((p->features & AHC_ULTRA) != 0) { -ultra: - mvi SINDEX, ULTRA_ENB+1; - test SAVED_TCL, 0x80 jnz ultra_2; /* Target ID > 7 */ - dec SINDEX; -ultra_2: - mov FUNCTION1,SAVED_TCL; - mov A,FUNCTION1; - test SINDIR, A jz ndx_dtr; - or SXFRCTL0, FAST20; - } -/* - * Initialize SCSIRATE with the appropriate value for this target. - * The SCSIRATE settings for each target are stored in an array - * based at TARG_SCSIRATE. - */ -ndx_dtr: - shr A,4,SAVED_TCL; - if ((p->features & AHC_TWIN) != 0) { - test SBLKCTL,SELBUSB jz ndx_dtr_2; - or SAVED_TCL, SELBUSB; - or A,0x08; /* Channel B entries add 8 */ -ndx_dtr_2: - } - - if ((p->features & AHC_ULTRA2) != 0) { - add SINDEX, TARG_OFFSET, A; - mov SCSIOFFSET, SINDIR; - } - - add SINDEX,TARG_SCSIRATE,A; - mov SCSIRATE,SINDIR ret; - - -selection: - test SSTAT0,SELDO jnz select_out; -/* - * Reselection has been initiated by a target. Make a note that we've been - * reselected, but haven't seen an IDENTIFY message from the target yet. - */ -initiator_reselect: - mvi CLRSINT0, CLRSELDI; - /* XXX test for and handle ONE BIT condition */ - and SAVED_TCL, SELID_MASK, SELID; - mvi CLRSINT1,CLRBUSFREE; - or SIMODE1, ENBUSFREE; /* - * We aren't expecting a - * bus free, so interrupt - * the kernel driver if it - * happens. - */ - mvi SPIOEN call initialize_channel; - mvi MSG_OUT, MSG_NOOP; /* No message to send */ - jmp ITloop; - -/* - * After the selection, remove this SCB from the "waiting SCB" - * list. This is achieved by simply moving our "next" pointer into - * WAITING_SCBH. Our next pointer will be set to null the next time this - * SCB is used, so don't bother with it now. - */ -select_out: - /* Turn off the selection hardware */ - mvi SCSISEQ, ENRSELI|ENAUTOATNP; /* - * ATN on parity errors - * for "in" phases - */ - mvi CLRSINT0, CLRSELDO; - mov SCBPTR, WAITING_SCBH; - mov WAITING_SCBH,SCB_NEXT; - mov SAVED_TCL, SCB_TCL; - mvi CLRSINT1,CLRBUSFREE; - or SIMODE1, ENBUSFREE; /* - * We aren't expecting a - * bus free, so interrupt - * the kernel driver if it - * happens. - */ - mvi SPIOEN call initialize_channel; -/* - * As soon as we get a successful selection, the target should go - * into the message out phase since we have ATN asserted. - */ - mvi MSG_OUT, MSG_IDENTIFYFLAG; - or SEQ_FLAGS, IDENTIFY_SEEN; - -/* - * Main loop for information transfer phases. Wait for the target - * to assert REQ before checking MSG, C/D and I/O for the bus phase. - */ -ITloop: - call phase_lock; - - mov A, LASTPHASE; - - test A, ~P_DATAIN jz p_data; - cmp A,P_COMMAND je p_command; - cmp A,P_MESGOUT je p_mesgout; - cmp A,P_STATUS je p_status; - cmp A,P_MESGIN je p_mesgin; - - mvi INTSTAT,BAD_PHASE; /* unknown phase - signal driver */ - jmp ITloop; /* Try reading the bus again. */ - -await_busfree: - and SIMODE1, ~ENBUSFREE; - call clear_target_state; - mov NONE, SCSIDATL; /* Ack the last byte */ - and SXFRCTL0, ~SPIOEN; - test SSTAT1,REQINIT|BUSFREE jz .; - test SSTAT1, BUSFREE jnz poll_for_work; - mvi INTSTAT, BAD_PHASE; - -clear_target_state: - /* - * We assume that the kernel driver may reset us - * at any time, even in the middle of a DMA, so - * clear DFCNTRL too. - */ - clr DFCNTRL; - - /* - * We don't know the target we will connect to, - * so default to narrow transfers to avoid - * parity problems. - */ - if ((p->features & AHC_ULTRA2) != 0) { - bmov SCSIRATE, ALLZEROS, 2; - } else { - clr SCSIRATE; - and SXFRCTL0, ~(FAST20); - } - mvi LASTPHASE, P_BUSFREE; - /* clear target specific flags */ - clr SEQ_FLAGS ret; - - -data_phase_reinit: -/* - * If we re-enter the data phase after going through another phase, the - * STCNT may have been cleared, so restore it from the residual field. - * On Ultra2, we have to put it into the HCNT field because we have to - * drop the data down into the shadow layer via the preload ability. - */ - if ((p->features & AHC_ULTRA2) != 0) { - bmov HADDR, SHADDR, 4; - bmov HCNT, SCB_RESID_DCNT, 3; - } - if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { - bmov STCNT, SCB_RESID_DCNT, 3; - } - if ((p->features & AHC_CMD_CHAN) == 0) { - mvi DINDEX, STCNT; - mvi SCB_RESID_DCNT call bcopy_3; - } - jmp data_phase_loop; -p_data: - if ((p->features & AHC_ULTRA2) != 0) { - mvi DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN; - } else { - mvi DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET; - } - test LASTPHASE, IOI jnz . + 2; - or DMAPARAMS, DIRECTION; - call assert; /* - * Ensure entering a data - * phase is okay - seen identify, etc. - */ - if ((p->features & AHC_CMD_CHAN) != 0) { - mvi CCSGADDR, CCSGADDR_MAX; - } - - test SEQ_FLAGS, DPHASE jnz data_phase_reinit; - or SEQ_FLAGS, DPHASE; /* we've seen a data phase */ - /* - * Initialize the DMA address and counter from the SCB. - * Also set SG_COUNT and SG_NEXT in memory since we cannot - * modify the values in the SCB itself until we see a - * save data pointers message. - */ - if ((p->features & AHC_CMD_CHAN) != 0) { - bmov HADDR, SCB_DATAPTR, 7; - bmov SG_COUNT, SCB_SGCOUNT, 5; - if ((p->features & AHC_ULTRA2) == 0) { - bmov STCNT, HCNT, 3; - } - } else { - mvi DINDEX, HADDR; - mvi SCB_DATAPTR call bcopy_7; - call set_stcnt_from_hcnt; - mvi DINDEX, SG_COUNT; - mvi SCB_SGCOUNT call bcopy_5; - } -data_phase_loop: - /* Guard against overruns */ - test SG_COUNT, 0xff jnz data_phase_inbounds; -/* - * Turn on 'Bit Bucket' mode, set the transfer count to - * 16meg and let the target run until it changes phase. - * When the transfer completes, notify the host that we - * had an overrun. - */ - or SXFRCTL1,BITBUCKET; - and DMAPARAMS, ~(HDMAEN|SDMAEN); - if ((p->features & AHC_ULTRA2) != 0) { - bmov HCNT, ALLONES, 3; - } - if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { - bmov STCNT, ALLONES, 3; - } - if ((p->features & AHC_CMD_CHAN) == 0) { - mvi STCNT[0], 0xFF; - mvi STCNT[1], 0xFF; - mvi STCNT[2], 0xFF; - } - -data_phase_inbounds: -/* If we are the last SG block, tell the hardware. */ - if ((p->features & AHC_ULTRA2) != 0) { - shl A, 2, SG_COUNT; - cmp SG_COUNT,0x01 jne data_phase_wideodd; - or A, LAST_SEG; - } else { - cmp SG_COUNT,0x01 jne data_phase_wideodd; - and DMAPARAMS, ~WIDEODD; - } -data_phase_wideodd: - if ((p->features & AHC_ULTRA2) != 0) { - mov SG_CACHEPTR, A; - mov DFCNTRL, DMAPARAMS; /* start the operation */ - test SXFRCTL1, BITBUCKET jnz data_phase_overrun; -u2_preload_wait: - test SSTAT1, PHASEMIS jnz u2_phasemis; - test DFSTATUS, PRELOAD_AVAIL jz u2_preload_wait; - } else { - mov DMAPARAMS call dma; -data_phase_dma_done: -/* Go tell the host about any overruns */ - test SXFRCTL1,BITBUCKET jnz data_phase_overrun; - -/* Exit if we had an underrun. dma clears SINDEX in this case. */ - test SINDEX,0xff jz data_phase_finish; - } -/* - * Advance the scatter-gather pointers - */ -sg_advance: - if ((p->features & AHC_ULTRA2) != 0) { - cmp SG_COUNT, 0x01 je u2_data_phase_finish; - } else { - dec SG_COUNT; - test SG_COUNT, 0xff jz data_phase_finish; - } - - if ((p->features & AHC_CMD_CHAN) != 0) { - - /* - * Do we have any prefetch left??? - */ - cmp CCSGADDR, CCSGADDR_MAX jne prefetch_avail; - - /* - * Fetch MIN(CCSGADDR_MAX, (SG_COUNT * 8)) bytes. - */ - add A, -(CCSGRAM_MAXSEGS + 1), SG_COUNT; - mvi A, CCSGADDR_MAX; - jc . + 2; - shl A, 3, SG_COUNT; - mov CCHCNT, A; - bmov CCHADDR, SG_NEXT, 4; - mvi CCSGCTL, CCSGEN|CCSGRESET; - test CCSGCTL, CCSGDONE jz .; - and CCSGCTL, ~CCSGEN; - test CCSGCTL, CCSGEN jnz .; - mvi CCSGCTL, CCSGRESET; -prefetch_avail: - bmov HADDR, CCSGRAM, 8; - if ((p->features & AHC_ULTRA2) == 0) { - bmov STCNT, HCNT, 3; - } else { - dec SG_COUNT; - } - } else { - mvi DINDEX, HADDR; - mvi SG_NEXT call bcopy_4; - - mvi HCNT[0],SG_SIZEOF; - clr HCNT[1]; - clr HCNT[2]; - - or DFCNTRL, HDMAEN|DIRECTION|FIFORESET; - - call dma_finish; - -/* - * Copy data from FIFO into SCB data pointer and data count. - * This assumes that the SG segments are of the form: - * struct ahc_dma_seg { - * u_int32_t addr; four bytes, little-endian order - * u_int32_t len; four bytes, little endian order - * }; - */ - mvi DINDEX, HADDR; - call dfdat_in_7; - call set_stcnt_from_hcnt; - } -/* Advance the SG pointer */ - clr A; /* add sizeof(struct scatter) */ - add SG_NEXT[0],SG_SIZEOF; - adc SG_NEXT[1],A; - - if ((p->features & AHC_ULTRA2) != 0) { - jmp data_phase_loop; - } else { - test SSTAT1, REQINIT jz .; - test SSTAT1,PHASEMIS jz data_phase_loop; - } - - -/* - * We've loaded all of our segments into the preload layer. Now, we simply - * have to wait for it to finish or for us to get a phasemis. And, since - * we'll get a phasemis if we do finish, all we really need to do is wait - * for a phasemis then check if we did actually complete all the segments. - */ - if ((p->features & AHC_ULTRA2) != 0) { -u2_data_phase_finish: - test SSTAT1, PHASEMIS jnz u2_phasemis; - test SG_CACHEPTR, LAST_SEG_DONE jz u2_data_phase_finish; - clr SG_COUNT; - test SSTAT1, REQINIT jz .; - test SSTAT1, PHASEMIS jz data_phase_loop; -u2_phasemis: - call ultra2_dmafinish; - test SG_CACHEPTR, LAST_SEG_DONE jnz data_phase_finish; - test SSTAT2, SHVALID jnz u2_fixup_residual; - mvi INTSTAT, SEQ_SG_FIXUP; - jmp data_phase_finish; -u2_fixup_residual: - shr ARG_1, 2, SG_CACHEPTR; -u2_phasemis_loop: - and A, 0x3f, SG_COUNT; - cmp ARG_1, A je data_phase_finish; -/* - * Subtract SG_SIZEOF from the SG_NEXT pointer and add 1 to the SG_COUNT - */ - clr A; - add SG_NEXT[0], -SG_SIZEOF; - adc SG_NEXT[1], 0xff; - inc SG_COUNT; - jmp u2_phasemis_loop; - } - -data_phase_finish: -/* - * After a DMA finishes, save the SG and STCNT residuals back into the SCB - * We use STCNT instead of HCNT, since it's a reflection of how many bytes - * were transferred on the SCSI (as opposed to the host) bus. - */ - if ((p->features & AHC_CMD_CHAN) != 0) { - bmov SCB_RESID_DCNT, STCNT, 3; - mov SCB_RESID_SGCNT, SG_COUNT; - if ((p->features & AHC_ULTRA2) != 0) { - or SXFRCTL0, CLRSTCNT|CLRCHN; - } - } else { - mov SCB_RESID_DCNT[0],STCNT[0]; - mov SCB_RESID_DCNT[1],STCNT[1]; - mov SCB_RESID_DCNT[2],STCNT[2]; - mov SCB_RESID_SGCNT, SG_COUNT; - } - - jmp ITloop; - -data_phase_overrun: -/* - * Turn off BITBUCKET mode and notify the host - */ - if ((p->features & AHC_ULTRA2) != 0) { -/* - * Wait for the target to quit transferring data on the SCSI bus - */ - test SSTAT1, PHASEMIS jz .; - call ultra2_dmafinish; - } - and SXFRCTL1, ~BITBUCKET; - mvi INTSTAT,DATA_OVERRUN; - jmp ITloop; - - - - -/* - * Actually turn off the DMA hardware, save our current position into the - * proper residual variables, wait for the next REQ signal, then jump to - * the ITloop. Jumping to the ITloop ensures that if we happen to get - * brought into the data phase again (or are still in it after our last - * segment) that we will properly signal an overrun to the kernel. - */ - if ((p->features & AHC_ULTRA2) != 0) { -ultra2_dmafinish: - test DFCNTRL, DIRECTION jnz ultra2_dmahalt; - and DFCNTRL, ~SCSIEN; - test DFCNTRL, SCSIEN jnz .; - if ((p->bugs & AHC_BUG_AUTOFLUSH) != 0) { - or DFCNTRL, FIFOFLUSH; - } -ultra2_dmafifoflush: - if ((p->bugs & AHC_BUG_AUTOFLUSH) != 0) { - /* - * hardware bug alert! This needless set of jumps - * works around a glitch in the silicon. When the - * PCI DMA fifo goes empty, but there is still SCSI - * data to be flushed into the PCI DMA fifo (and from - * there on into main memory), the FIFOEMP bit will - * come on between the time when the PCI DMA buffer - * went empty and the next bit of data is copied from - * the SCSI fifo into the PCI fifo. It should only - * come on when both FIFOs (meaning the entire FIFO - * chain) are empty. Since it can take up to 4 cycles - * for new data to be copied from the SCSI fifo into - * the PCI fifo, testing for FIFOEMP status for 4 - * extra times gives the needed time for any - * remaining SCSI fifo data to be put in the PCI fifo - * before we declare it *truly* empty. - */ - test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; - test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; - test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; - test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; - } - test DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; - test DFSTATUS, MREQPEND jnz .; -ultra2_dmahalt: - and DFCNTRL, ~(HDMAEN|SCSIEN); - test DFCNTRL, (HDMAEN|SCSIEN) jnz .; - ret; - } - -/* - * Command phase. Set up the DMA registers and let 'er rip. - */ -p_command: - call assert; - -/* - * Load HADDR and HCNT. - */ - if ((p->features & AHC_CMD_CHAN) != 0) { - bmov HADDR, SCB_CMDPTR, 5; - bmov HCNT[1], ALLZEROS, 2; - if ((p->features & AHC_ULTRA2) == 0) { - bmov STCNT, HCNT, 3; - } - } else { - mvi DINDEX, HADDR; - mvi SCB_CMDPTR call bcopy_5; - clr HCNT[1]; - clr HCNT[2]; - call set_stcnt_from_hcnt; - } - - if ((p->features & AHC_ULTRA2) == 0) { - mvi (SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET) call dma; - } else { - mvi DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION); - test SSTAT0, SDONE jnz .; -p_command_dma_loop: - test SSTAT0, SDONE jnz p_command_ultra2_dma_done; - test SSTAT1,PHASEMIS jz p_command_dma_loop; /* ie. underrun */ -p_command_ultra2_dma_done: - test SCSISIGI, REQI jz p_command_ultra2_shutdown; - test SSTAT1, (PHASEMIS|REQINIT) jz p_command_ultra2_dma_done; -p_command_ultra2_shutdown: - and DFCNTRL, ~(HDMAEN|SCSIEN); - test DFCNTRL, (HDMAEN|SCSIEN) jnz .; - or SXFRCTL0, CLRSTCNT|CLRCHN; - } - jmp ITloop; - -/* - * Status phase. Wait for the data byte to appear, then read it - * and store it into the SCB. - */ -p_status: - call assert; - - mov SCB_TARGET_STATUS, SCSIDATL; - jmp ITloop; - -/* - * Message out phase. If MSG_OUT is 0x80, build I full indentify message - * sequence and send it to the target. In addition, if the MK_MESSAGE bit - * is set in the SCB_CONTROL byte, interrupt the host and allow it to send - * it's own message. - * - * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. - * This is done to allow the hsot to send messages outside of an identify - * sequence while protecting the seqencer from testing the MK_MESSAGE bit - * on an SCB that might not be for the current nexus. (For example, a - * BDR message in response to a bad reselection would leave us pointed to - * an SCB that doesn't have anything to do with the current target). - * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, - * bus device reset). - * - * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, - * in case the target decides to put us in this phase for some strange - * reason. - */ -p_mesgout_retry: - or SCSISIGO,ATNO,LASTPHASE;/* turn on ATN for the retry */ -p_mesgout: - mov SINDEX, MSG_OUT; - cmp SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; -p_mesgout_identify: - if ((p->features & AHC_WIDE) != 0) { - and SINDEX,0xf,SCB_TCL; /* lun */ - } else { - and SINDEX,0x7,SCB_TCL; /* lun */ - } - and A,DISCENB,SCB_CONTROL; /* mask off disconnect privilege */ - or SINDEX,A; /* or in disconnect privilege */ - or SINDEX,MSG_IDENTIFYFLAG; -p_mesgout_mk_message: - test SCB_CONTROL,MK_MESSAGE jz p_mesgout_tag; - mov SCSIDATL, SINDEX; /* Send the last byte */ - jmp p_mesgout_from_host + 1;/* Skip HOST_MSG test */ -/* - * Send a tag message if TAG_ENB is set in the SCB control block. - * Use SCB_TAG (the position in the kernel's SCB array) as the tag value. - */ -p_mesgout_tag: - test SCB_CONTROL,TAG_ENB jz p_mesgout_onebyte; - mov SCSIDATL, SINDEX; /* Send the identify message */ - call phase_lock; - cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; - and SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; - call phase_lock; - cmp LASTPHASE, P_MESGOUT jne p_mesgout_done; - mov SCB_TAG jmp p_mesgout_onebyte; -/* - * Interrupt the driver, and allow it to send a message - * if it asks. - */ -p_mesgout_from_host: - cmp SINDEX, HOST_MSG jne p_mesgout_onebyte; - mvi INTSTAT,AWAITING_MSG; - nop; - /* - * Did the host detect a phase change? - */ - cmp RETURN_1, MSGOUT_PHASEMIS je p_mesgout_done; - -p_mesgout_onebyte: - mvi CLRSINT1, CLRATNO; - mov SCSIDATL, SINDEX; - -/* - * If the next bus phase after ATN drops is a message out, it means - * that the target is requesting that the last message(s) be resent. - */ - call phase_lock; - cmp LASTPHASE, P_MESGOUT je p_mesgout_retry; - -p_mesgout_done: - mvi CLRSINT1,CLRATNO; /* Be sure to turn ATNO off */ - mov LAST_MSG, MSG_OUT; - cmp MSG_OUT, MSG_IDENTIFYFLAG jne . + 2; - and SCB_CONTROL, ~MK_MESSAGE; - mvi MSG_OUT, MSG_NOOP; /* No message left */ - jmp ITloop; - -/* - * Message in phase. Bytes are read using Automatic PIO mode. - */ -p_mesgin: - mvi ACCUM call inb_first; /* read the 1st message byte */ - - test A,MSG_IDENTIFYFLAG jnz mesgin_identify; - cmp A,MSG_DISCONNECT je mesgin_disconnect; - cmp A,MSG_SAVEDATAPOINTER je mesgin_sdptrs; - cmp ALLZEROS,A je mesgin_complete; - cmp A,MSG_RESTOREPOINTERS je mesgin_rdptrs; - cmp A,MSG_EXTENDED je mesgin_extended; - cmp A,MSG_MESSAGE_REJECT je mesgin_reject; - cmp A,MSG_NOOP je mesgin_done; - cmp A,MSG_IGN_WIDE_RESIDUE je mesgin_wide_residue; - -rej_mesgin: -/* - * We have no idea what this message in is, so we issue a message reject - * and hope for the best. In any case, rejection should be a rare - * occurrence - signal the driver when it happens. - */ - mvi INTSTAT,SEND_REJECT; /* let driver know */ - - mvi MSG_MESSAGE_REJECT call mk_mesg; - -mesgin_done: - mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ - jmp ITloop; - - -mesgin_complete: -/* - * We got a "command complete" message, so put the SCB_TAG into the QOUTFIFO, - * and trigger a completion interrupt. Before doing so, check to see if there - * is a residual or the status byte is something other than STATUS_GOOD (0). - * In either of these conditions, we upload the SCB back to the host so it can - * process this information. In the case of a non zero status byte, we - * additionally interrupt the kernel driver synchronously, allowing it to - * decide if sense should be retrieved. If the kernel driver wishes to request - * sense, it will fill the kernel SCB with a request sense command and set - * RETURN_1 to SEND_SENSE. If RETURN_1 is set to SEND_SENSE we redownload - * the SCB, and process it as the next command by adding it to the waiting list. - * If the kernel driver does not wish to request sense, it need only clear - * RETURN_1, and the command is allowed to complete normally. We don't bother - * to post to the QOUTFIFO in the error cases since it would require extra - * work in the kernel driver to ensure that the entry was removed before the - * command complete code tried processing it. - */ - -/* - * First check for residuals - */ - test SCB_RESID_SGCNT,0xff jnz upload_scb; - test SCB_TARGET_STATUS,0xff jz complete; /* Good Status? */ -upload_scb: - mvi DMAPARAMS, FIFORESET; - mov SCB_TAG call dma_scb; -check_status: - test SCB_TARGET_STATUS,0xff jz complete; /* Just a residual? */ - mvi INTSTAT,BAD_STATUS; /* let driver know */ - nop; - cmp RETURN_1, SEND_SENSE jne complete; - /* This SCB becomes the next to execute as it will retrieve sense */ - mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; - mov SCB_TAG call dma_scb; -add_to_waiting_list: - mov SCB_NEXT,WAITING_SCBH; - mov WAITING_SCBH, SCBPTR; - /* - * Prepare our selection hardware before the busfree so we have a - * high probability of winning arbitration. - */ - call start_selection; - jmp await_busfree; - -complete: - /* If we are untagged, clear our address up in host ram */ - test SCB_CONTROL, TAG_ENB jnz complete_post; - mov A, SAVED_TCL; - mvi UNTAGGEDSCB_OFFSET call post_byte_setup; - mvi SCB_LIST_NULL call post_byte; - -complete_post: - /* Post the SCB and issue an interrupt */ - if ((p->features & AHC_QUEUE_REGS) != 0) { - mov A, SDSCB_QOFF; - } else { - mov A, QOUTPOS; - } - mvi QOUTFIFO_OFFSET call post_byte_setup; - mov SCB_TAG call post_byte; - if ((p->features & AHC_QUEUE_REGS) == 0) { - inc QOUTPOS; - } - mvi INTSTAT,CMDCMPLT; - -add_to_free_list: - call add_scb_to_free_list; - jmp await_busfree; - -/* - * Is it an extended message? Copy the message to our message buffer and - * notify the host. The host will tell us whether to reject this message, - * respond to it with the message that the host placed in our message buffer, - * or simply to do nothing. - */ -mesgin_extended: - mvi INTSTAT,EXTENDED_MSG; /* let driver know */ - jmp ITloop; - -/* - * Is it a disconnect message? Set a flag in the SCB to remind us - * and await the bus going free. - */ -mesgin_disconnect: - or SCB_CONTROL,DISCONNECTED; - call add_scb_to_disc_list; - jmp await_busfree; - -/* - * Save data pointers message: - * Copying RAM values back to SCB, for Save Data Pointers message, but - * only if we've actually been into a data phase to change them. This - * protects against bogus data in scratch ram and the residual counts - * since they are only initialized when we go into data_in or data_out. - */ -mesgin_sdptrs: - test SEQ_FLAGS, DPHASE jz mesgin_done; - /* - * The SCB SGPTR becomes the next one we'll download, - * and the SCB DATAPTR becomes the current SHADDR. - * Use the residual number since STCNT is corrupted by - * any message transfer. - */ - if ((p->features & AHC_CMD_CHAN) != 0) { - bmov SCB_SGCOUNT, SG_COUNT, 5; - bmov SCB_DATAPTR, SHADDR, 4; - bmov SCB_DATACNT, SCB_RESID_DCNT, 3; - } else { - mvi DINDEX, SCB_SGCOUNT; - mvi SG_COUNT call bcopy_5; - mvi DINDEX, SCB_DATAPTR; - mvi SHADDR call bcopy_4; - mvi SCB_RESID_DCNT call bcopy_3; - } - jmp mesgin_done; - -/* - * Restore pointers message? Data pointers are recopied from the - * SCB anytime we enter a data phase for the first time, so all - * we need to do is clear the DPHASE flag and let the data phase - * code do the rest. - */ -mesgin_rdptrs: - and SEQ_FLAGS, ~DPHASE; /* - * We'll reload them - * the next time through - * the dataphase. - */ - jmp mesgin_done; - -/* - * Identify message? For a reconnecting target, this tells us the lun - * that the reconnection is for - find the correct SCB and switch to it, - * clearing the "disconnected" bit so we don't "find" it by accident later. - */ -mesgin_identify: - - if ((p->features & AHC_WIDE) != 0) { - and A,0x0f; /* lun in lower four bits */ - } else { - and A,0x07; /* lun in lower three bits */ - } - or SAVED_TCL,A; /* SAVED_TCL should be complete now */ - - mvi ARG_2, SCB_LIST_NULL; /* SCBID of prev SCB in disc List */ - call get_untagged_SCBID; - cmp ARG_1, SCB_LIST_NULL je snoop_tag; - if ((p->flags & AHC_PAGESCBS) != 0) { - test SEQ_FLAGS, SCBPTR_VALID jz use_retrieveSCB; - } - /* - * If the SCB was found in the disconnected list (as is - * always the case in non-paging scenarios), SCBPTR is already - * set to the correct SCB. So, simply setup the SCB and get - * on with things. - */ - mov SCBPTR call rem_scb_from_disc_list; - jmp setup_SCB; -/* - * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. - * If we get one, we use the tag returned to find the proper - * SCB. With SCB paging, this requires using search for both tagged - * and non-tagged transactions since the SCB may exist in any slot. - * If we're not using SCB paging, we can use the tag as the direct - * index to the SCB. - */ -snoop_tag: - mov NONE,SCSIDATL; /* ACK Identify MSG */ -snoop_tag_loop: - call phase_lock; - cmp LASTPHASE, P_MESGIN jne not_found; - cmp SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found; -get_tag: - mvi ARG_1 call inb_next; /* tag value */ - -use_retrieveSCB: - call retrieveSCB; -setup_SCB: - mov A, SAVED_TCL; - cmp SCB_TCL, A jne not_found_cleanup_scb; - test SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb; - and SCB_CONTROL,~DISCONNECTED; - or SEQ_FLAGS,IDENTIFY_SEEN; /* make note of IDENTIFY */ - /* See if the host wants to send a message upon reconnection */ - test SCB_CONTROL, MK_MESSAGE jz mesgin_done; - and SCB_CONTROL, ~MK_MESSAGE; - mvi HOST_MSG call mk_mesg; - jmp mesgin_done; - -not_found_cleanup_scb: - test SCB_CONTROL, DISCONNECTED jz . + 3; - call add_scb_to_disc_list; - jmp not_found; - call add_scb_to_free_list; -not_found: - mvi INTSTAT, NO_MATCH; - mvi MSG_BUS_DEV_RESET call mk_mesg; - jmp mesgin_done; - -/* - * Message reject? Let the kernel driver handle this. If we have an - * outstanding WDTR or SDTR negotiation, assume that it's a response from - * the target selecting 8bit or asynchronous transfer, otherwise just ignore - * it since we have no clue what it pertains to. - */ -mesgin_reject: - mvi INTSTAT, REJECT_MSG; - jmp mesgin_done; - -/* - * Wide Residue. We handle the simple cases, but pass of the one hard case - * to the kernel (when the residue byte happened to cause us to advance our - * sg element array, so we know have to back that advance out). - */ -mesgin_wide_residue: - mvi ARG_1 call inb_next; /* ACK the wide_residue and get */ - /* the size byte */ -/* - * In order for this to be reliable, we have to do all sorts of horrible - * magic in terms of resetting the datafifo and reloading the shadow layer - * with the correct new values (so that a subsequent save data pointers - * message will do the right thing). We let the kernel do that work. - */ - mvi INTSTAT, WIDE_RESIDUE; - jmp mesgin_done; - -/* - * [ ADD MORE MESSAGE HANDLING HERE ] - */ - -/* - * Locking the driver out, build a one-byte message passed in SINDEX - * if there is no active message already. SINDEX is returned intact. - */ -mk_mesg: - or SCSISIGO,ATNO,LASTPHASE;/* turn on ATNO */ - mov MSG_OUT,SINDEX ret; - -/* - * Functions to read data in Automatic PIO mode. - * - * According to Adaptec's documentation, an ACK is not sent on input from - * the target until SCSIDATL is read from. So we wait until SCSIDATL is - * latched (the usual way), then read the data byte directly off the bus - * using SCSIBUSL. When we have pulled the ATN line, or we just want to - * acknowledge the byte, then we do a dummy read from SCISDATL. The SCSI - * spec guarantees that the target will hold the data byte on the bus until - * we send our ACK. - * - * The assumption here is that these are called in a particular sequence, - * and that REQ is already set when inb_first is called. inb_{first,next} - * use the same calling convention as inb. - */ - -inb_next: - mov NONE,SCSIDATL; /*dummy read from latch to ACK*/ -inb_next_wait: - /* - * If there is a parity error, wait for the kernel to - * see the interrupt and prepare our message response - * before continuing. - */ - test SSTAT1, REQINIT jz inb_next_wait; - test SSTAT1, SCSIPERR jnz .; - and LASTPHASE, PHASE_MASK, SCSISIGI; - cmp LASTPHASE, P_MESGIN jne mesgin_phasemis; -inb_first: - mov DINDEX,SINDEX; - mov DINDIR,SCSIBUSL ret; /*read byte directly from bus*/ -inb_last: - mov NONE,SCSIDATL ret; /*dummy read from latch to ACK*/ - - -mesgin_phasemis: -/* - * We expected to receive another byte, but the target changed phase - */ - mvi INTSTAT, MSGIN_PHASEMIS; - jmp ITloop; - -/* - * DMA data transfer. HADDR and HCNT must be loaded first, and - * SINDEX should contain the value to load DFCNTRL with - 0x3d for - * host->scsi, or 0x39 for scsi->host. The SCSI channel is cleared - * during initialization. - */ -if ((p->features & AHC_ULTRA2) == 0) { -dma: - mov DFCNTRL,SINDEX; -dma_loop: - test SSTAT0,DMADONE jnz dma_dmadone; - test SSTAT1,PHASEMIS jz dma_loop; /* ie. underrun */ -dma_phasemis: - test SSTAT0,SDONE jnz dma_checkfifo; - mov SINDEX,ALLZEROS; /* Notify caller of phasemiss */ - -/* - * We will be "done" DMAing when the transfer count goes to zero, or - * the target changes the phase (in light of this, it makes sense that - * the DMA circuitry doesn't ACK when PHASEMIS is active). If we are - * doing a SCSI->Host transfer, the data FIFO should be flushed auto- - * magically on STCNT=0 or a phase change, so just wait for FIFO empty - * status. - */ -dma_checkfifo: - test DFCNTRL,DIRECTION jnz dma_fifoempty; -dma_fifoflush: - test DFSTATUS,FIFOEMP jz dma_fifoflush; - -dma_fifoempty: - /* Don't clobber an inprogress host data transfer */ - test DFSTATUS, MREQPEND jnz dma_fifoempty; -/* - * Now shut the DMA enables off and make sure that the DMA enables are - * actually off first lest we get an ILLSADDR. - */ -dma_dmadone: - cmp LASTPHASE, P_COMMAND je dma_await_nreq; - test SCSIRATE, 0x0f jnz dma_shutdown; -dma_await_nreq: - test SCSISIGI, REQI jz dma_shutdown; - test SSTAT1, (PHASEMIS|REQINIT) jz dma_await_nreq; -dma_shutdown: - and DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); -dma_halt: - /* - * Some revisions of the aic7880 have a problem where, if the - * data fifo is full, but the PCI input latch is not empty, - * HDMAEN cannot be cleared. The fix used here is to attempt - * to drain the data fifo until there is space for the input - * latch to drain and HDMAEN de-asserts. - */ - if ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0) { - mov NONE, DFDAT; - } - test DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt; -} -return: - ret; - -/* - * Assert that if we've been reselected, then we've seen an IDENTIFY - * message. - */ -assert: - test SEQ_FLAGS,IDENTIFY_SEEN jnz return; /* seen IDENTIFY? */ - - mvi INTSTAT,NO_IDENT ret; /* no - tell the kernel */ - -/* - * Locate a disconnected SCB either by SAVED_TCL (ARG_1 is SCB_LIST_NULL) - * or by the SCBID ARG_1. The search begins at the SCB index passed in - * via SINDEX which is an SCB that must be on the disconnected list. If - * the SCB cannot be found, SINDEX will be SCB_LIST_NULL, otherwise, SCBPTR - * is set to the proper SCB. - */ -findSCB: - mov SCBPTR,SINDEX; /* Initialize SCBPTR */ - cmp ARG_1, SCB_LIST_NULL jne findSCB_by_SCBID; - mov A, SAVED_TCL; - mvi SCB_TCL jmp findSCB_loop; /* &SCB_TCL -> SINDEX */ -findSCB_by_SCBID: - mov A, ARG_1; /* Tag passed in ARG_1 */ - mvi SCB_TAG jmp findSCB_loop; /* &SCB_TAG -> SINDEX */ -findSCB_next: - mov ARG_2, SCBPTR; - cmp SCB_NEXT, SCB_LIST_NULL je notFound; - mov SCBPTR,SCB_NEXT; - dec SINDEX; /* Last comparison moved us too far */ -findSCB_loop: - cmp SINDIR, A jne findSCB_next; - mov SINDEX, SCBPTR ret; -notFound: - mvi SINDEX, SCB_LIST_NULL ret; - -/* - * Retrieve an SCB by SCBID first searching the disconnected list falling - * back to DMA'ing the SCB down from the host. This routine assumes that - * ARG_1 is the SCBID of interest and that SINDEX is the position in the - * disconnected list to start the search from. If SINDEX is SCB_LIST_NULL, - * we go directly to the host for the SCB. - */ -retrieveSCB: - test SEQ_FLAGS, SCBPTR_VALID jz retrieve_from_host; - mov SCBPTR call findSCB; /* Continue the search */ - cmp SINDEX, SCB_LIST_NULL je retrieve_from_host; - -/* - * This routine expects SINDEX to contain the index of the SCB to be - * removed, SCBPTR to be pointing to that SCB, and ARG_2 to be the - * SCBID of the SCB just previous to this one in the list or SCB_LIST_NULL - * if it is at the head. - */ -rem_scb_from_disc_list: -/* Remove this SCB from the disconnection list */ - cmp ARG_2, SCB_LIST_NULL je rHead; - mov DINDEX, SCB_NEXT; - mov SCBPTR, ARG_2; - mov SCB_NEXT, DINDEX; - mov SCBPTR, SINDEX ret; -rHead: - mov DISCONNECTED_SCBH,SCB_NEXT ret; - -retrieve_from_host: -/* - * We didn't find it. Pull an SCB and DMA down the one we want. - * We should never get here in the non-paging case. - */ - mov ALLZEROS call get_free_or_disc_scb; - mvi DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; - /* Jump instead of call as we want to return anyway */ - mov ARG_1 jmp dma_scb; - -/* - * Determine whether a target is using tagged or non-tagged transactions - * by first looking for a matching transaction based on the TCL and if - * that fails, looking up this device in the host's untagged SCB array. - * The TCL to search for is assumed to be in SAVED_TCL. The value is - * returned in ARG_1 (SCB_LIST_NULL for tagged, SCBID for non-tagged). - * The SCBPTR_VALID bit is set in SEQ_FLAGS if we found the information - * in an SCB instead of having to go to the host. - */ -get_untagged_SCBID: - cmp DISCONNECTED_SCBH, SCB_LIST_NULL je get_SCBID_from_host; - mvi ARG_1, SCB_LIST_NULL; - mov DISCONNECTED_SCBH call findSCB; - cmp SINDEX, SCB_LIST_NULL je get_SCBID_from_host; - or SEQ_FLAGS, SCBPTR_VALID;/* Was in disconnected list */ - test SCB_CONTROL, TAG_ENB jnz . + 2; - mov ARG_1, SCB_TAG ret; - mvi ARG_1, SCB_LIST_NULL ret; - -/* - * Fetch a byte from host memory given an index of (A + (256 * SINDEX)) - * and a base address of SCBID_ADDR. The byte is returned in RETURN_2. - */ -fetch_byte: - mov ARG_2, SINDEX; - if ((p->features & AHC_CMD_CHAN) != 0) { - mvi DINDEX, CCHADDR; - mvi SCBID_ADDR call set_1byte_addr; - mvi CCHCNT, 1; - mvi CCSGCTL, CCSGEN|CCSGRESET; - test CCSGCTL, CCSGDONE jz .; - mvi CCSGCTL, CCSGRESET; - bmov RETURN_2, CCSGRAM, 1 ret; - } else { - mvi DINDEX, HADDR; - mvi SCBID_ADDR call set_1byte_addr; - mvi HCNT[0], 1; - clr HCNT[1]; - clr HCNT[2]; - mvi DFCNTRL, HDMAEN|DIRECTION|FIFORESET; - call dma_finish; - mov RETURN_2, DFDAT ret; - } - -/* - * Prepare the hardware to post a byte to host memory given an - * index of (A + (256 * SINDEX)) and a base address of SCBID_ADDR. - */ -post_byte_setup: - mov ARG_2, SINDEX; - if ((p->features & AHC_CMD_CHAN) != 0) { - mvi DINDEX, CCHADDR; - mvi SCBID_ADDR call set_1byte_addr; - mvi CCHCNT, 1; - mvi CCSCBCTL, CCSCBRESET ret; - } else { - mvi DINDEX, HADDR; - mvi SCBID_ADDR call set_1byte_addr; - mvi HCNT[0], 1; - clr HCNT[1]; - clr HCNT[2]; - mvi DFCNTRL, FIFORESET ret; - } - -post_byte: - if ((p->features & AHC_CMD_CHAN) != 0) { - bmov CCSCBRAM, SINDEX, 1; - or CCSCBCTL, CCSCBEN|CCSCBRESET; - test CCSCBCTL, CCSCBDONE jz .; - clr CCSCBCTL ret; - } else { - mov DFDAT, SINDEX; - or DFCNTRL, HDMAEN|FIFOFLUSH; - jmp dma_finish; - } - -get_SCBID_from_host: - mov A, SAVED_TCL; - mvi UNTAGGEDSCB_OFFSET call fetch_byte; - mov RETURN_1, RETURN_2 ret; - -phase_lock: - test SSTAT1, REQINIT jz phase_lock; - test SSTAT1, SCSIPERR jnz phase_lock; - and SCSISIGO, PHASE_MASK, SCSISIGI; - and LASTPHASE, PHASE_MASK, SCSISIGI ret; - -if ((p->features & AHC_CMD_CHAN) == 0) { -set_stcnt_from_hcnt: - mov STCNT[0], HCNT[0]; - mov STCNT[1], HCNT[1]; - mov STCNT[2], HCNT[2] ret; - -bcopy_7: - mov DINDIR, SINDIR; - mov DINDIR, SINDIR; -bcopy_5: - mov DINDIR, SINDIR; -bcopy_4: - mov DINDIR, SINDIR; -bcopy_3: - mov DINDIR, SINDIR; - mov DINDIR, SINDIR; - mov DINDIR, SINDIR ret; -} - -/* - * Setup addr assuming that A is an index into - * an array of 32byte objects, SINDEX contains - * the base address of that array, and DINDEX - * contains the base address of the location - * to store the indexed address. - */ -set_32byte_addr: - shr ARG_2, 3, A; - shl A, 5; -/* - * Setup addr assuming that A + (ARG_1 * 256) is an - * index into an array of 1byte objects, SINDEX contains - * the base address of that array, and DINDEX contains - * the base address of the location to store the computed - * address. - */ -set_1byte_addr: - add DINDIR, A, SINDIR; - mov A, ARG_2; - adc DINDIR, A, SINDIR; - clr A; - adc DINDIR, A, SINDIR; - adc DINDIR, A, SINDIR ret; - -/* - * Either post or fetch and SCB from host memory based on the - * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX. - */ -dma_scb: - mov A, SINDEX; - if ((p->features & AHC_CMD_CHAN) != 0) { - mvi DINDEX, CCHADDR; - mvi HSCB_ADDR call set_32byte_addr; - mov CCSCBPTR, SCBPTR; - mvi CCHCNT, 32; - test DMAPARAMS, DIRECTION jz dma_scb_tohost; - mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET; - cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .; - jmp dma_scb_finish; -dma_scb_tohost: - if ((p->features & AHC_ULTRA2) == 0) { - mvi CCSCBCTL, CCSCBRESET; - bmov CCSCBRAM, SCB_CONTROL, 32; - or CCSCBCTL, CCSCBEN|CCSCBRESET; - test CCSCBCTL, CCSCBDONE jz .; - } - if ((p->features & AHC_ULTRA2) != 0) { - if ((p->bugs & AHC_BUG_SCBCHAN_UPLOAD) != 0) { - mvi CCSCBCTL, CCARREN|CCSCBRESET; - cmp CCSCBCTL, ARRDONE|CCARREN jne .; - mvi CCHCNT, 32; - mvi CCSCBCTL, CCSCBEN|CCSCBRESET; - cmp CCSCBCTL, CCSCBDONE|CCSCBEN jne .; - } else { - mvi CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET; - cmp CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .; - } - } -dma_scb_finish: - clr CCSCBCTL; - test CCSCBCTL, CCARREN|CCSCBEN jnz .; - ret; - } - if ((p->features & AHC_CMD_CHAN) == 0) { - mvi DINDEX, HADDR; - mvi HSCB_ADDR call set_32byte_addr; - mvi HCNT[0], 32; - clr HCNT[1]; - clr HCNT[2]; - mov DFCNTRL, DMAPARAMS; - test DMAPARAMS, DIRECTION jnz dma_scb_fromhost; - /* Fill it with the SCB data */ -copy_scb_tofifo: - mvi SINDEX, SCB_CONTROL; - add A, 32, SINDEX; -copy_scb_tofifo_loop: - mov DFDAT,SINDIR; - mov DFDAT,SINDIR; - mov DFDAT,SINDIR; - mov DFDAT,SINDIR; - mov DFDAT,SINDIR; - mov DFDAT,SINDIR; - mov DFDAT,SINDIR; - mov DFDAT,SINDIR; - cmp SINDEX, A jne copy_scb_tofifo_loop; - or DFCNTRL, HDMAEN|FIFOFLUSH; - jmp dma_finish; -dma_scb_fromhost: - mvi DINDEX, SCB_CONTROL; - if ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0) { - /* - * Set the A to -24. It it hits 0, then we let - * our code fall through to dfdat_in_8 to complete - * the last of the copy. - * - * Also, things happen 8 bytes at a time in this - * case, so we may need to drain the fifo at most - * 3 times to keep things flowing - */ - mvi A, -24; -dma_scb_hang_fifo: - /* Wait for the first bit of data to hit the fifo */ - test DFSTATUS, FIFOEMP jnz .; -dma_scb_hang_wait: - /* OK, now they've started to transfer into the fifo, - * so wait for them to stop trying to transfer any - * more data. - */ - test DFSTATUS, MREQPEND jnz .; - /* - * OK, they started, then they stopped, now see if they - * managed to complete the job before stopping. Try - * it multiple times to give the chip a few cycles to - * set the flag if it did complete. - */ - test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; - test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; - test DFSTATUS, HDONE jnz dma_scb_hang_dma_done; - /* - * Too bad, the chip didn't complete the DMA, but there - * aren't any more memory requests pending, so that - * means it stopped part way through and hung. That's - * our bug, so now we drain what data there is in the - * fifo in order to get things going again. - */ -dma_scb_hang_empty_fifo: - call dfdat_in_8; - add A, 8; - add SINDEX, A, HCNT; - /* - * If there are another 8 bytes of data waiting in the - * fifo, then the carry bit will be set as a result - * of the above add command (unless A is non-negative, - * in which case the carry bit won't be set). - */ - jc dma_scb_hang_empty_fifo; - /* - * We've emptied the fifo now, but we wouldn't have got - * here if the memory transfer hadn't stopped part way - * through, so go back up to the beginning of the - * loop and start over. When it succeeds in getting - * all the data down, HDONE will be set and we'll - * jump to the code just below here. - */ - jmp dma_scb_hang_fifo; -dma_scb_hang_dma_done: - and DFCNTRL, ~HDMAEN; - test DFCNTRL, HDMAEN jnz .; - call dfdat_in_8; - add A, 8; - cmp A, 8 jne . - 2; - ret; - } else { - call dma_finish; - call dfdat_in_8; - call dfdat_in_8; - call dfdat_in_8; - } -dfdat_in_8: - mov DINDIR,DFDAT; -dfdat_in_7: - mov DINDIR,DFDAT; - mov DINDIR,DFDAT; - mov DINDIR,DFDAT; - mov DINDIR,DFDAT; - mov DINDIR,DFDAT; - mov DINDIR,DFDAT; - mov DINDIR,DFDAT ret; - } - - -/* - * Wait for DMA from host memory to data FIFO to complete, then disable - * DMA and wait for it to acknowledge that it's off. - */ -if ((p->features & AHC_CMD_CHAN) == 0) { -dma_finish: - test DFSTATUS,HDONE jz dma_finish; - /* Turn off DMA */ - and DFCNTRL, ~HDMAEN; - test DFCNTRL, HDMAEN jnz .; - ret; -} - -add_scb_to_free_list: - if ((p->flags & AHC_PAGESCBS) != 0) { - mov SCB_NEXT, FREE_SCBH; - mov FREE_SCBH, SCBPTR; - } - mvi SCB_TAG, SCB_LIST_NULL ret; - -if ((p->flags & AHC_PAGESCBS) != 0) { -get_free_or_disc_scb: - cmp FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb; - cmp DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb; -return_error: - mvi SINDEX, SCB_LIST_NULL ret; -dequeue_disc_scb: - mov SCBPTR, DISCONNECTED_SCBH; -dma_up_scb: - mvi DMAPARAMS, FIFORESET; - mov SCB_TAG call dma_scb; -unlink_disc_scb: - mov DISCONNECTED_SCBH, SCB_NEXT ret; -dequeue_free_scb: - mov SCBPTR, FREE_SCBH; - mov FREE_SCBH, SCB_NEXT ret; -} - -add_scb_to_disc_list: -/* - * Link this SCB into the DISCONNECTED list. This list holds the - * candidates for paging out an SCB if one is needed for a new command. - * Modifying the disconnected list is a critical(pause dissabled) section. - */ - mov SCB_NEXT, DISCONNECTED_SCBH; - mov DISCONNECTED_SCBH, SCBPTR ret; diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c deleted file mode 100644 index b07e4f04fd0..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c +++ /dev/null @@ -1,373 +0,0 @@ -/*+M************************************************************************* - * Adaptec AIC7xxx device driver proc support for Linux. - * - * Copyright (c) 1995, 1996 Dean W. Gehnert - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - * - * ---------------------------------------------------------------- - * o Modified from the EATA-DMA /proc support. - * o Additional support for device block statistics provided by - * Matthew Jacob. - * o Correction of overflow by Heinz Mauelshagen - * o Adittional corrections by Doug Ledford - * - * Dean W. Gehnert, deang@teleport.com, 05/01/96 - * - * $Id: aic7xxx_proc.c,v 4.1 1997/06/97 08:23:42 deang Exp $ - *-M*************************************************************************/ - - -#define BLS (&aic7xxx_buffer[size]) -#define HDRB \ -" 0 - 4K 4 - 16K 16 - 64K 64 - 256K 256K - 1M 1M+" - -#ifdef PROC_DEBUG -extern int vsprintf(char *, const char *, va_list); - -static void -proc_debug(const char *fmt, ...) -{ - va_list ap; - char buf[256]; - - va_start(ap, fmt); - vsprintf(buf, fmt, ap); - printk(buf); - va_end(ap); -} -#else /* PROC_DEBUG */ -# define proc_debug(fmt, args...) -#endif /* PROC_DEBUG */ - -static int aic7xxx_buffer_size = 0; -static char *aic7xxx_buffer = NULL; - - -/*+F************************************************************************* - * Function: - * aic7xxx_set_info - * - * Description: - * Set parameters for the driver from the /proc filesystem. - *-F*************************************************************************/ -static int -aic7xxx_set_info(char *buffer, int length, struct Scsi_Host *HBAptr) -{ - proc_debug("aic7xxx_set_info(): %s\n", buffer); - return (-ENOSYS); /* Currently this is a no-op */ -} - - -/*+F************************************************************************* - * Function: - * aic7xxx_proc_info - * - * Description: - * Return information to handle /proc support for the driver. - *-F*************************************************************************/ -int -aic7xxx_proc_info ( struct Scsi_Host *HBAptr, char *buffer, char **start, off_t offset, int length, - int inout) -{ - struct aic7xxx_host *p; - struct aic_dev_data *aic_dev; - struct scsi_device *sdptr; - int size = 0; - unsigned char i; - unsigned char tindex; - - for(p=first_aic7xxx; p && p->host != HBAptr; p=p->next) - ; - - if (!p) - { - size += sprintf(buffer, "Can't find adapter for host number %d\n", HBAptr->host_no); - if (size > length) - { - return (size); - } - else - { - return (length); - } - } - - if (inout == TRUE) /* Has data been written to the file? */ - { - return (aic7xxx_set_info(buffer, length, HBAptr)); - } - - p = (struct aic7xxx_host *) HBAptr->hostdata; - - /* - * It takes roughly 1K of space to hold all relevant card info, not - * counting any proc stats, so we start out with a 1.5k buffer size and - * if proc_stats is defined, then we sweep the stats structure to see - * how many drives we will be printing out for and add 384 bytes per - * device with active stats. - * - * Hmmmm...that 1.5k seems to keep growing as items get added so they - * can be easily viewed for debugging purposes. So, we bumped that - * 1.5k to 4k so we can quit having to bump it all the time. - */ - - size = 4096; - list_for_each_entry(aic_dev, &p->aic_devs, list) - size += 512; - if (aic7xxx_buffer_size != size) - { - if (aic7xxx_buffer != NULL) - { - kfree(aic7xxx_buffer); - aic7xxx_buffer_size = 0; - } - aic7xxx_buffer = kmalloc(size, GFP_KERNEL); - } - if (aic7xxx_buffer == NULL) - { - size = sprintf(buffer, "AIC7xxx - kmalloc error at line %d\n", - __LINE__); - return size; - } - aic7xxx_buffer_size = size; - - size = 0; - size += sprintf(BLS, "Adaptec AIC7xxx driver version: "); - size += sprintf(BLS, "%s/", AIC7XXX_C_VERSION); - size += sprintf(BLS, "%s", AIC7XXX_H_VERSION); - size += sprintf(BLS, "\n"); - size += sprintf(BLS, "Adapter Configuration:\n"); - size += sprintf(BLS, " SCSI Adapter: %s\n", - board_names[p->board_name_index]); - if (p->flags & AHC_TWIN) - size += sprintf(BLS, " Twin Channel Controller "); - else - { - char *channel = ""; - char *ultra = ""; - char *wide = "Narrow "; - if (p->flags & AHC_MULTI_CHANNEL) - { - channel = " Channel A"; - if (p->flags & (AHC_CHNLB|AHC_CHNLC)) - channel = (p->flags & AHC_CHNLB) ? " Channel B" : " Channel C"; - } - if (p->features & AHC_WIDE) - wide = "Wide "; - if (p->features & AHC_ULTRA3) - { - switch(p->chip & AHC_CHIPID_MASK) - { - case AHC_AIC7892: - case AHC_AIC7899: - ultra = "Ultra-160/m LVD/SE "; - break; - default: - ultra = "Ultra-3 LVD/SE "; - break; - } - } - else if (p->features & AHC_ULTRA2) - ultra = "Ultra-2 LVD/SE "; - else if (p->features & AHC_ULTRA) - ultra = "Ultra "; - size += sprintf(BLS, " %s%sController%s ", - ultra, wide, channel); - } - switch(p->chip & ~AHC_CHIPID_MASK) - { - case AHC_VL: - size += sprintf(BLS, "at VLB slot %d\n", p->pci_device_fn); - break; - case AHC_EISA: - size += sprintf(BLS, "at EISA slot %d\n", p->pci_device_fn); - break; - default: - size += sprintf(BLS, "at PCI %d/%d/%d\n", p->pci_bus, - PCI_SLOT(p->pci_device_fn), PCI_FUNC(p->pci_device_fn)); - break; - } - if( !(p->maddr) ) - { - size += sprintf(BLS, " Programmed I/O Base: %lx\n", p->base); - } - else - { - size += sprintf(BLS, " PCI MMAPed I/O Base: 0x%lx\n", p->mbase); - } - if( (p->chip & (AHC_VL | AHC_EISA)) ) - { - size += sprintf(BLS, " BIOS Memory Address: 0x%08x\n", p->bios_address); - } - size += sprintf(BLS, " Adapter SEEPROM Config: %s\n", - (p->flags & AHC_SEEPROM_FOUND) ? "SEEPROM found and used." : - ((p->flags & AHC_USEDEFAULTS) ? "SEEPROM not found, using defaults." : - "SEEPROM not found, using leftover BIOS values.") ); - size += sprintf(BLS, " Adaptec SCSI BIOS: %s\n", - (p->flags & AHC_BIOS_ENABLED) ? "Enabled" : "Disabled"); - size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq); - size += sprintf(BLS, " SCBs: Active %d, Max Active %d,\n", - p->activescbs, p->max_activescbs); - size += sprintf(BLS, " Allocated %d, HW %d, " - "Page %d\n", p->scb_data->numscbs, p->scb_data->maxhscbs, - p->scb_data->maxscbs); - if (p->flags & AHC_EXTERNAL_SRAM) - size += sprintf(BLS, " Using External SCB SRAM\n"); - size += sprintf(BLS, " Interrupts: %ld", p->isr_count); - if (p->chip & AHC_EISA) - { - size += sprintf(BLS, " %s\n", - (p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)"); - } - else - { - size += sprintf(BLS, "\n"); - } - size += sprintf(BLS, " BIOS Control Word: 0x%04x\n", - p->bios_control); - size += sprintf(BLS, " Adapter Control Word: 0x%04x\n", - p->adapter_control); - size += sprintf(BLS, " Extended Translation: %sabled\n", - (p->flags & AHC_EXTEND_TRANS_A) ? "En" : "Dis"); - size += sprintf(BLS, "Disconnect Enable Flags: 0x%04x\n", p->discenable); - if (p->features & (AHC_ULTRA | AHC_ULTRA2)) - { - size += sprintf(BLS, " Ultra Enable Flags: 0x%04x\n", p->ultraenb); - } - size += sprintf(BLS, "Default Tag Queue Depth: %d\n", aic7xxx_default_queue_depth); - size += sprintf(BLS, " Tagged Queue By Device array for aic7xxx host " - "instance %d:\n", p->instance); - size += sprintf(BLS, " {"); - for(i=0; i < (MAX_TARGETS - 1); i++) - size += sprintf(BLS, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]); - size += sprintf(BLS, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]); - - size += sprintf(BLS, "\n"); - size += sprintf(BLS, "Statistics:\n\n"); - list_for_each_entry(aic_dev, &p->aic_devs, list) - { - sdptr = aic_dev->SDptr; - tindex = sdptr->channel << 3 | sdptr->id; - size += sprintf(BLS, "(scsi%d:%d:%d:%d)\n", - p->host_no, sdptr->channel, sdptr->id, sdptr->lun); - size += sprintf(BLS, " Device using %s/%s", - (aic_dev->cur.width == MSG_EXT_WDTR_BUS_16_BIT) ? - "Wide" : "Narrow", - (aic_dev->cur.offset != 0) ? - "Sync transfers at " : "Async transfers.\n" ); - if (aic_dev->cur.offset != 0) - { - struct aic7xxx_syncrate *sync_rate; - unsigned char options = aic_dev->cur.options; - int period = aic_dev->cur.period; - int rate = (aic_dev->cur.width == - MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0; - - sync_rate = aic7xxx_find_syncrate(p, &period, 0, &options); - if (sync_rate != NULL) - { - size += sprintf(BLS, "%s MByte/sec, offset %d\n", - sync_rate->rate[rate], - aic_dev->cur.offset ); - } - else - { - size += sprintf(BLS, "3.3 MByte/sec, offset %d\n", - aic_dev->cur.offset ); - } - } - size += sprintf(BLS, " Transinfo settings: "); - size += sprintf(BLS, "current(%d/%d/%d/%d), ", - aic_dev->cur.period, - aic_dev->cur.offset, - aic_dev->cur.width, - aic_dev->cur.options); - size += sprintf(BLS, "goal(%d/%d/%d/%d), ", - aic_dev->goal.period, - aic_dev->goal.offset, - aic_dev->goal.width, - aic_dev->goal.options); - size += sprintf(BLS, "user(%d/%d/%d/%d)\n", - p->user[tindex].period, - p->user[tindex].offset, - p->user[tindex].width, - p->user[tindex].options); - if(sdptr->simple_tags) - { - size += sprintf(BLS, " Tagged Command Queueing Enabled, Ordered Tags %s, Depth %d/%d\n", sdptr->ordered_tags ? "Enabled" : "Disabled", sdptr->queue_depth, aic_dev->max_q_depth); - } - if(aic_dev->barrier_total) - size += sprintf(BLS, " Total transfers %ld:\n (%ld/%ld/%ld/%ld reads/writes/REQ_BARRIER/Ordered Tags)\n", - aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total, - aic_dev->barrier_total, aic_dev->ordered_total); - else - size += sprintf(BLS, " Total transfers %ld:\n (%ld/%ld reads/writes)\n", - aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total); - size += sprintf(BLS, "%s\n", HDRB); - size += sprintf(BLS, " Reads:"); - for (i = 0; i < ARRAY_SIZE(aic_dev->r_bins); i++) - { - size += sprintf(BLS, " %10ld", aic_dev->r_bins[i]); - } - size += sprintf(BLS, "\n"); - size += sprintf(BLS, " Writes:"); - for (i = 0; i < ARRAY_SIZE(aic_dev->w_bins); i++) - { - size += sprintf(BLS, " %10ld", aic_dev->w_bins[i]); - } - size += sprintf(BLS, "\n"); - size += sprintf(BLS, "\n\n"); - } - if (size >= aic7xxx_buffer_size) - { - printk(KERN_WARNING "aic7xxx: Overflow in aic7xxx_proc.c\n"); - } - - if (offset > size - 1) - { - kfree(aic7xxx_buffer); - aic7xxx_buffer = NULL; - aic7xxx_buffer_size = length = 0; - *start = NULL; - } - else - { - *start = buffer; - length = min_t(int, length, size - offset); - memcpy(buffer, &aic7xxx_buffer[offset], length); - } - - return (length); -} - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 2 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -2 - * c-argdecl-indent: 2 - * c-label-offset: -2 - * c-continued-statement-offset: 2 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_reg.h b/drivers/scsi/aic7xxx_old/aic7xxx_reg.h deleted file mode 100644 index 27f2334abc7..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx_reg.h +++ /dev/null @@ -1,629 +0,0 @@ -/* - * DO NOT EDIT - This file is automatically generated. - */ - -#define SCSISEQ 0x00 -#define TEMODE 0x80 -#define ENSELO 0x40 -#define ENSELI 0x20 -#define ENRSELI 0x10 -#define ENAUTOATNO 0x08 -#define ENAUTOATNI 0x04 -#define ENAUTOATNP 0x02 -#define SCSIRSTO 0x01 - -#define SXFRCTL0 0x01 -#define DFON 0x80 -#define DFPEXP 0x40 -#define FAST20 0x20 -#define CLRSTCNT 0x10 -#define SPIOEN 0x08 -#define SCAMEN 0x04 -#define CLRCHN 0x02 - -#define SXFRCTL1 0x02 -#define BITBUCKET 0x80 -#define SWRAPEN 0x40 -#define ENSPCHK 0x20 -#define STIMESEL 0x18 -#define ENSTIMER 0x04 -#define ACTNEGEN 0x02 -#define STPWEN 0x01 - -#define SCSISIGO 0x03 -#define CDO 0x80 -#define IOO 0x40 -#define MSGO 0x20 -#define ATNO 0x10 -#define SELO 0x08 -#define BSYO 0x04 -#define REQO 0x02 -#define ACKO 0x01 - -#define SCSISIGI 0x03 -#define ATNI 0x10 -#define SELI 0x08 -#define BSYI 0x04 -#define REQI 0x02 -#define ACKI 0x01 - -#define SCSIRATE 0x04 -#define WIDEXFER 0x80 -#define SXFR_ULTRA2 0x7f -#define SXFR 0x70 -#define SOFS 0x0f - -#define SCSIID 0x05 -#define SCSIOFFSET 0x05 -#define SOFS_ULTRA2 0x7f - -#define SCSIDATL 0x06 - -#define SCSIDATH 0x07 - -#define STCNT 0x08 - -#define OPTIONMODE 0x08 -#define AUTORATEEN 0x80 -#define AUTOACKEN 0x40 -#define ATNMGMNTEN 0x20 -#define BUSFREEREV 0x10 -#define EXPPHASEDIS 0x08 -#define SCSIDATL_IMGEN 0x04 -#define AUTO_MSGOUT_DE 0x02 -#define DIS_MSGIN_DUALEDGE 0x01 - -#define CLRSINT0 0x0b -#define CLRSELDO 0x40 -#define CLRSELDI 0x20 -#define CLRSELINGO 0x10 -#define CLRSWRAP 0x08 -#define CLRSPIORDY 0x02 - -#define SSTAT0 0x0b -#define TARGET 0x80 -#define SELDO 0x40 -#define SELDI 0x20 -#define SELINGO 0x10 -#define IOERR 0x08 -#define SWRAP 0x08 -#define SDONE 0x04 -#define SPIORDY 0x02 -#define DMADONE 0x01 - -#define CLRSINT1 0x0c -#define CLRSELTIMEO 0x80 -#define CLRATNO 0x40 -#define CLRSCSIRSTI 0x20 -#define CLRBUSFREE 0x08 -#define CLRSCSIPERR 0x04 -#define CLRPHASECHG 0x02 -#define CLRREQINIT 0x01 - -#define SSTAT1 0x0c -#define SELTO 0x80 -#define ATNTARG 0x40 -#define SCSIRSTI 0x20 -#define PHASEMIS 0x10 -#define BUSFREE 0x08 -#define SCSIPERR 0x04 -#define PHASECHG 0x02 -#define REQINIT 0x01 - -#define SSTAT2 0x0d -#define OVERRUN 0x80 -#define SHVALID 0x40 -#define WIDE_RES 0x20 -#define SFCNT 0x1f -#define EXP_ACTIVE 0x10 -#define CRCVALERR 0x08 -#define CRCENDERR 0x04 -#define CRCREQERR 0x02 -#define DUAL_EDGE_ERROR 0x01 - -#define SSTAT3 0x0e -#define SCSICNT 0xf0 -#define OFFCNT 0x0f - -#define SCSIID_ULTRA2 0x0f -#define OID 0x0f - -#define SIMODE0 0x10 -#define ENSELDO 0x40 -#define ENSELDI 0x20 -#define ENSELINGO 0x10 -#define ENIOERR 0x08 -#define ENSWRAP 0x08 -#define ENSDONE 0x04 -#define ENSPIORDY 0x02 -#define ENDMADONE 0x01 - -#define SIMODE1 0x11 -#define ENSELTIMO 0x80 -#define ENATNTARG 0x40 -#define ENSCSIRST 0x20 -#define ENPHASEMIS 0x10 -#define ENBUSFREE 0x08 -#define ENSCSIPERR 0x04 -#define ENPHASECHG 0x02 -#define ENREQINIT 0x01 - -#define SCSIBUSL 0x12 - -#define SCSIBUSH 0x13 - -#define SHADDR 0x14 - -#define SELTIMER 0x18 -#define STAGE6 0x20 -#define STAGE5 0x10 -#define STAGE4 0x08 -#define STAGE3 0x04 -#define STAGE2 0x02 -#define STAGE1 0x01 - -#define SELID 0x19 -#define SELID_MASK 0xf0 -#define ONEBIT 0x08 - -#define SPIOCAP 0x1b -#define SOFT1 0x80 -#define SOFT0 0x40 -#define SOFTCMDEN 0x20 -#define HAS_BRDCTL 0x10 -#define SEEPROM 0x08 -#define EEPROM 0x04 -#define ROM 0x02 -#define SSPIOCPS 0x01 - -#define BRDCTL 0x1d -#define BRDDAT7 0x80 -#define BRDDAT6 0x40 -#define BRDDAT5 0x20 -#define BRDDAT4 0x10 -#define BRDSTB 0x10 -#define BRDCS 0x08 -#define BRDDAT3 0x08 -#define BRDDAT2 0x04 -#define BRDRW 0x04 -#define BRDRW_ULTRA2 0x02 -#define BRDCTL1 0x02 -#define BRDSTB_ULTRA2 0x01 -#define BRDCTL0 0x01 - -#define SEECTL 0x1e -#define EXTARBACK 0x80 -#define EXTARBREQ 0x40 -#define SEEMS 0x20 -#define SEERDY 0x10 -#define SEECS 0x08 -#define SEECK 0x04 -#define SEEDO 0x02 -#define SEEDI 0x01 - -#define SBLKCTL 0x1f -#define DIAGLEDEN 0x80 -#define DIAGLEDON 0x40 -#define AUTOFLUSHDIS 0x20 -#define ENAB40 0x08 -#define ENAB20 0x04 -#define SELWIDE 0x02 -#define XCVR 0x01 - -#define SRAM_BASE 0x20 - -#define TARG_SCSIRATE 0x20 - -#define ULTRA_ENB 0x30 - -#define DISC_DSB 0x32 - -#define MSG_OUT 0x34 - -#define DMAPARAMS 0x35 -#define PRELOADEN 0x80 -#define WIDEODD 0x40 -#define SCSIEN 0x20 -#define SDMAENACK 0x10 -#define SDMAEN 0x10 -#define HDMAEN 0x08 -#define HDMAENACK 0x08 -#define DIRECTION 0x04 -#define FIFOFLUSH 0x02 -#define FIFORESET 0x01 - -#define SEQ_FLAGS 0x36 -#define IDENTIFY_SEEN 0x80 -#define SCBPTR_VALID 0x20 -#define DPHASE 0x10 -#define AMTARGET 0x08 -#define WIDE_BUS 0x02 -#define TWIN_BUS 0x01 - -#define SAVED_TCL 0x37 - -#define SG_COUNT 0x38 - -#define SG_NEXT 0x39 - -#define LASTPHASE 0x3d -#define P_MESGIN 0xe0 -#define PHASE_MASK 0xe0 -#define P_STATUS 0xc0 -#define P_MESGOUT 0xa0 -#define P_COMMAND 0x80 -#define CDI 0x80 -#define IOI 0x40 -#define P_DATAIN 0x40 -#define MSGI 0x20 -#define P_BUSFREE 0x01 -#define P_DATAOUT 0x00 - -#define WAITING_SCBH 0x3e - -#define DISCONNECTED_SCBH 0x3f - -#define FREE_SCBH 0x40 - -#define HSCB_ADDR 0x41 - -#define SCBID_ADDR 0x45 - -#define TMODE_CMDADDR 0x49 - -#define KERNEL_QINPOS 0x4d - -#define QINPOS 0x4e - -#define QOUTPOS 0x4f - -#define TMODE_CMDADDR_NEXT 0x50 - -#define ARG_1 0x51 -#define RETURN_1 0x51 -#define SEND_MSG 0x80 -#define SEND_SENSE 0x40 -#define SEND_REJ 0x20 -#define MSGOUT_PHASEMIS 0x10 - -#define ARG_2 0x52 -#define RETURN_2 0x52 - -#define LAST_MSG 0x53 - -#define PREFETCH_CNT 0x54 - -#define SCSICONF 0x5a -#define TERM_ENB 0x80 -#define RESET_SCSI 0x40 -#define HWSCSIID 0x0f -#define HSCSIID 0x07 - -#define HOSTCONF 0x5d - -#define HA_274_BIOSCTRL 0x5f -#define BIOSMODE 0x30 -#define BIOSDISABLED 0x30 -#define CHANNEL_B_PRIMARY 0x08 - -#define SEQCTL 0x60 -#define PERRORDIS 0x80 -#define PAUSEDIS 0x40 -#define FAILDIS 0x20 -#define FASTMODE 0x10 -#define BRKADRINTEN 0x08 -#define STEP 0x04 -#define SEQRESET 0x02 -#define LOADRAM 0x01 - -#define SEQRAM 0x61 - -#define SEQADDR0 0x62 - -#define SEQADDR1 0x63 -#define SEQADDR1_MASK 0x01 - -#define ACCUM 0x64 - -#define SINDEX 0x65 - -#define DINDEX 0x66 - -#define ALLONES 0x69 - -#define ALLZEROS 0x6a - -#define NONE 0x6a - -#define FLAGS 0x6b -#define ZERO 0x02 -#define CARRY 0x01 - -#define SINDIR 0x6c - -#define DINDIR 0x6d - -#define FUNCTION1 0x6e - -#define STACK 0x6f - -#define TARG_OFFSET 0x70 - -#define BCTL 0x84 -#define ACE 0x08 -#define ENABLE 0x01 - -#define DSCOMMAND0 0x84 -#define INTSCBRAMSEL 0x08 -#define RAMPS 0x04 -#define USCBSIZE32 0x02 -#define CIOPARCKEN 0x01 - -#define DSCOMMAND 0x84 -#define CACHETHEN 0x80 -#define DPARCKEN 0x40 -#define MPARCKEN 0x20 -#define EXTREQLCK 0x10 - -#define BUSTIME 0x85 -#define BOFF 0xf0 -#define BON 0x0f - -#define BUSSPD 0x86 -#define DFTHRSH 0xc0 -#define STBOFF 0x38 -#define STBON 0x07 - -#define DSPCISTATUS 0x86 -#define DFTHRSH_100 0xc0 - -#define HCNTRL 0x87 -#define POWRDN 0x40 -#define SWINT 0x10 -#define IRQMS 0x08 -#define PAUSE 0x04 -#define INTEN 0x02 -#define CHIPRST 0x01 -#define CHIPRSTACK 0x01 - -#define HADDR 0x88 - -#define HCNT 0x8c - -#define SCBPTR 0x90 - -#define INTSTAT 0x91 -#define SEQINT_MASK 0xf1 -#define DATA_OVERRUN 0xe1 -#define MSGIN_PHASEMIS 0xd1 -#define TRACEPOINT2 0xc1 -#define SEQ_SG_FIXUP 0xb1 -#define AWAITING_MSG 0xa1 -#define RESIDUAL 0x81 -#define BAD_STATUS 0x71 -#define REJECT_MSG 0x61 -#define WIDE_RESIDUE 0x51 -#define EXTENDED_MSG 0x41 -#define NO_MATCH 0x31 -#define NO_IDENT 0x21 -#define SEND_REJECT 0x11 -#define INT_PEND 0x0f -#define BRKADRINT 0x08 -#define SCSIINT 0x04 -#define CMDCMPLT 0x02 -#define BAD_PHASE 0x01 -#define SEQINT 0x01 - -#define CLRINT 0x92 -#define CLRPARERR 0x10 -#define CLRBRKADRINT 0x08 -#define CLRSCSIINT 0x04 -#define CLRCMDINT 0x02 -#define CLRSEQINT 0x01 - -#define ERROR 0x92 -#define CIOPARERR 0x80 -#define PCIERRSTAT 0x40 -#define MPARERR 0x20 -#define DPARERR 0x10 -#define SQPARERR 0x08 -#define ILLOPCODE 0x04 -#define DSCTMOUT 0x02 -#define ILLSADDR 0x02 -#define ILLHADDR 0x01 - -#define DFCNTRL 0x93 - -#define DFSTATUS 0x94 -#define PRELOAD_AVAIL 0x80 -#define DWORDEMP 0x20 -#define MREQPEND 0x10 -#define HDONE 0x08 -#define DFTHRESH 0x04 -#define FIFOFULL 0x02 -#define FIFOEMP 0x01 - -#define DFDAT 0x99 - -#define SCBCNT 0x9a -#define SCBAUTO 0x80 -#define SCBCNT_MASK 0x1f - -#define QINFIFO 0x9b - -#define QINCNT 0x9c - -#define SCSIDATL_IMG 0x9c - -#define QOUTFIFO 0x9d - -#define CRCCONTROL1 0x9d -#define CRCONSEEN 0x80 -#define CRCVALCHKEN 0x40 -#define CRCENDCHKEN 0x20 -#define CRCREQCHKEN 0x10 -#define TARGCRCENDEN 0x08 -#define TARGCRCCNTEN 0x04 - -#define SCSIPHASE 0x9e -#define SP_STATUS 0x20 -#define SP_COMMAND 0x10 -#define SP_MSG_IN 0x08 -#define SP_MSG_OUT 0x04 -#define SP_DATA_IN 0x02 -#define SP_DATA_OUT 0x01 - -#define QOUTCNT 0x9e - -#define SFUNCT 0x9f -#define ALT_MODE 0x80 - -#define SCB_CONTROL 0xa0 -#define MK_MESSAGE 0x80 -#define DISCENB 0x40 -#define TAG_ENB 0x20 -#define DISCONNECTED 0x04 -#define SCB_TAG_TYPE 0x03 - -#define SCB_BASE 0xa0 - -#define SCB_TCL 0xa1 -#define TID 0xf0 -#define SELBUSB 0x08 -#define LID 0x07 - -#define SCB_TARGET_STATUS 0xa2 - -#define SCB_SGCOUNT 0xa3 - -#define SCB_SGPTR 0xa4 - -#define SCB_RESID_SGCNT 0xa8 - -#define SCB_RESID_DCNT 0xa9 - -#define SCB_DATAPTR 0xac - -#define SCB_DATACNT 0xb0 - -#define SCB_CMDPTR 0xb4 - -#define SCB_CMDLEN 0xb8 - -#define SCB_TAG 0xb9 - -#define SCB_NEXT 0xba - -#define SCB_PREV 0xbb - -#define SCB_BUSYTARGETS 0xbc - -#define SEECTL_2840 0xc0 -#define CS_2840 0x04 -#define CK_2840 0x02 -#define DO_2840 0x01 - -#define STATUS_2840 0xc1 -#define EEPROM_TF 0x80 -#define BIOS_SEL 0x60 -#define ADSEL 0x1e -#define DI_2840 0x01 - -#define CCHADDR 0xe0 - -#define CCHCNT 0xe8 - -#define CCSGRAM 0xe9 - -#define CCSGADDR 0xea - -#define CCSGCTL 0xeb -#define CCSGDONE 0x80 -#define CCSGEN 0x08 -#define FLAG 0x02 -#define CCSGRESET 0x01 - -#define CCSCBRAM 0xec - -#define CCSCBADDR 0xed - -#define CCSCBCTL 0xee -#define CCSCBDONE 0x80 -#define ARRDONE 0x40 -#define CCARREN 0x10 -#define CCSCBEN 0x08 -#define CCSCBDIR 0x04 -#define CCSCBRESET 0x01 - -#define CCSCBCNT 0xef - -#define CCSCBPTR 0xf1 - -#define HNSCB_QOFF 0xf4 - -#define HESCB_QOFF 0xf5 - -#define SNSCB_QOFF 0xf6 - -#define SESCB_QOFF 0xf7 - -#define SDSCB_QOFF 0xf8 - -#define QOFF_CTLSTA 0xfa -#define ESTABLISH_SCB_AVAIL 0x80 -#define SCB_AVAIL 0x40 -#define SNSCB_ROLLOVER 0x20 -#define SDSCB_ROLLOVER 0x10 -#define SESCB_ROLLOVER 0x08 -#define SCB_QSIZE 0x07 -#define SCB_QSIZE_256 0x06 - -#define DFF_THRSH 0xfb -#define WR_DFTHRSH 0x70 -#define WR_DFTHRSH_MAX 0x70 -#define WR_DFTHRSH_90 0x60 -#define WR_DFTHRSH_85 0x50 -#define WR_DFTHRSH_75 0x40 -#define WR_DFTHRSH_63 0x30 -#define WR_DFTHRSH_50 0x20 -#define WR_DFTHRSH_25 0x10 -#define RD_DFTHRSH_MAX 0x07 -#define RD_DFTHRSH 0x07 -#define RD_DFTHRSH_90 0x06 -#define RD_DFTHRSH_85 0x05 -#define RD_DFTHRSH_75 0x04 -#define RD_DFTHRSH_63 0x03 -#define RD_DFTHRSH_50 0x02 -#define RD_DFTHRSH_25 0x01 -#define WR_DFTHRSH_MIN 0x00 -#define RD_DFTHRSH_MIN 0x00 - -#define SG_CACHEPTR 0xfc -#define SG_USER_DATA 0xfc -#define LAST_SEG 0x02 -#define LAST_SEG_DONE 0x01 - - -#define CMD_GROUP2_BYTE_DELTA 0xfa -#define MAX_OFFSET_8BIT 0x0f -#define BUS_16_BIT 0x01 -#define QINFIFO_OFFSET 0x02 -#define CMD_GROUP5_BYTE_DELTA 0x0b -#define CMD_GROUP_CODE_SHIFT 0x05 -#define MAX_OFFSET_ULTRA2 0x7f -#define MAX_OFFSET_16BIT 0x08 -#define BUS_8_BIT 0x00 -#define QOUTFIFO_OFFSET 0x01 -#define UNTAGGEDSCB_OFFSET 0x00 -#define CCSGRAM_MAXSEGS 0x10 -#define SCB_LIST_NULL 0xff -#define SG_SIZEOF 0x08 -#define CMD_GROUP4_BYTE_DELTA 0x04 -#define CMD_GROUP0_BYTE_DELTA 0xfc -#define HOST_MSG 0xff -#define BUS_32_BIT 0x02 -#define CCSGADDR_MAX 0x80 - - -/* Downloaded Constant Definitions */ -#define TMODE_NUMCMDS 0x00 diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_seq.c b/drivers/scsi/aic7xxx_old/aic7xxx_seq.c deleted file mode 100644 index e1bc140e973..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx_seq.c +++ /dev/null @@ -1,817 +0,0 @@ -/* - * DO NOT EDIT - This file is automatically generated. - */ -static unsigned char seqprog[] = { - 0xff, 0x6a, 0x06, 0x08, - 0x7f, 0x02, 0x04, 0x08, - 0x12, 0x6a, 0x00, 0x00, - 0xff, 0x6a, 0xd6, 0x09, - 0xff, 0x6a, 0xdc, 0x09, - 0x00, 0x65, 0xca, 0x58, - 0xf7, 0x01, 0x02, 0x08, - 0xff, 0x4e, 0xc8, 0x08, - 0xbf, 0x60, 0xc0, 0x08, - 0x60, 0x0b, 0x86, 0x68, - 0x40, 0x00, 0x0c, 0x68, - 0x08, 0x1f, 0x3e, 0x10, - 0x60, 0x0b, 0x86, 0x68, - 0x40, 0x00, 0x0c, 0x68, - 0x08, 0x1f, 0x3e, 0x10, - 0xff, 0x3e, 0x48, 0x60, - 0x40, 0xfa, 0x10, 0x78, - 0xff, 0xf6, 0xd4, 0x08, - 0x01, 0x4e, 0x9c, 0x18, - 0x40, 0x60, 0xc0, 0x00, - 0x00, 0x4d, 0x10, 0x70, - 0x01, 0x4e, 0x9c, 0x18, - 0xbf, 0x60, 0xc0, 0x08, - 0x00, 0x6a, 0x86, 0x5c, - 0xff, 0x4e, 0xc8, 0x18, - 0x02, 0x6a, 0x70, 0x5b, - 0xff, 0x52, 0x20, 0x09, - 0x0d, 0x6a, 0x6a, 0x00, - 0x00, 0x52, 0xe6, 0x5b, - 0x03, 0xb0, 0x52, 0x31, - 0xff, 0xb0, 0x52, 0x09, - 0xff, 0xb1, 0x54, 0x09, - 0xff, 0xb2, 0x56, 0x09, - 0xff, 0xa3, 0x50, 0x09, - 0xff, 0x3e, 0x74, 0x09, - 0xff, 0x90, 0x7c, 0x08, - 0xff, 0x3e, 0x20, 0x09, - 0x00, 0x65, 0x4e, 0x58, - 0x00, 0x65, 0x0c, 0x40, - 0xf7, 0x1f, 0xca, 0x08, - 0x08, 0xa1, 0xc8, 0x08, - 0x00, 0x65, 0xca, 0x00, - 0xff, 0x65, 0x3e, 0x08, - 0xf0, 0xa1, 0xc8, 0x08, - 0x0f, 0x0f, 0x1e, 0x08, - 0x00, 0x0f, 0x1e, 0x00, - 0xf0, 0xa1, 0xc8, 0x08, - 0x0f, 0x05, 0x0a, 0x08, - 0x00, 0x05, 0x0a, 0x00, - 0xff, 0x6a, 0x0c, 0x08, - 0x5a, 0x6a, 0x00, 0x04, - 0x12, 0x65, 0x02, 0x00, - 0x31, 0x6a, 0xca, 0x00, - 0x80, 0x37, 0x6e, 0x68, - 0xff, 0x65, 0xca, 0x18, - 0xff, 0x37, 0xdc, 0x08, - 0xff, 0x6e, 0xc8, 0x08, - 0x00, 0x6c, 0x76, 0x78, - 0x20, 0x01, 0x02, 0x00, - 0x4c, 0x37, 0xc8, 0x28, - 0x08, 0x1f, 0x7e, 0x78, - 0x08, 0x37, 0x6e, 0x00, - 0x08, 0x64, 0xc8, 0x00, - 0x70, 0x64, 0xca, 0x18, - 0xff, 0x6c, 0x0a, 0x08, - 0x20, 0x64, 0xca, 0x18, - 0xff, 0x6c, 0x08, 0x0c, - 0x40, 0x0b, 0x96, 0x68, - 0x20, 0x6a, 0x16, 0x00, - 0xf0, 0x19, 0x6e, 0x08, - 0x08, 0x6a, 0x18, 0x00, - 0x08, 0x11, 0x22, 0x00, - 0x08, 0x6a, 0x66, 0x58, - 0x08, 0x6a, 0x68, 0x00, - 0x00, 0x65, 0xaa, 0x40, - 0x12, 0x6a, 0x00, 0x00, - 0x40, 0x6a, 0x16, 0x00, - 0xff, 0x3e, 0x20, 0x09, - 0xff, 0xba, 0x7c, 0x08, - 0xff, 0xa1, 0x6e, 0x08, - 0x08, 0x6a, 0x18, 0x00, - 0x08, 0x11, 0x22, 0x00, - 0x08, 0x6a, 0x66, 0x58, - 0x80, 0x6a, 0x68, 0x00, - 0x80, 0x36, 0x6c, 0x00, - 0x00, 0x65, 0xba, 0x5b, - 0xff, 0x3d, 0xc8, 0x08, - 0xbf, 0x64, 0xe2, 0x78, - 0x80, 0x64, 0xc8, 0x71, - 0xa0, 0x64, 0xf8, 0x71, - 0xc0, 0x64, 0xf0, 0x71, - 0xe0, 0x64, 0x38, 0x72, - 0x01, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0xaa, 0x40, - 0xf7, 0x11, 0x22, 0x08, - 0x00, 0x65, 0xca, 0x58, - 0xff, 0x06, 0xd4, 0x08, - 0xf7, 0x01, 0x02, 0x08, - 0x09, 0x0c, 0xc4, 0x78, - 0x08, 0x0c, 0x0c, 0x68, - 0x01, 0x6a, 0x22, 0x01, - 0xff, 0x6a, 0x26, 0x09, - 0x02, 0x6a, 0x08, 0x30, - 0xff, 0x6a, 0x08, 0x08, - 0xdf, 0x01, 0x02, 0x08, - 0x01, 0x6a, 0x7a, 0x00, - 0xff, 0x6a, 0x6c, 0x0c, - 0x04, 0x14, 0x10, 0x31, - 0x03, 0xa9, 0x18, 0x31, - 0x03, 0xa9, 0x10, 0x30, - 0x08, 0x6a, 0xcc, 0x00, - 0xa9, 0x6a, 0xd0, 0x5b, - 0x00, 0x65, 0x02, 0x41, - 0xa8, 0x6a, 0x6a, 0x00, - 0x79, 0x6a, 0x6a, 0x00, - 0x40, 0x3d, 0xea, 0x68, - 0x04, 0x35, 0x6a, 0x00, - 0x00, 0x65, 0x2a, 0x5b, - 0x80, 0x6a, 0xd4, 0x01, - 0x10, 0x36, 0xd6, 0x68, - 0x10, 0x36, 0x6c, 0x00, - 0x07, 0xac, 0x10, 0x31, - 0x05, 0xa3, 0x70, 0x30, - 0x03, 0x8c, 0x10, 0x30, - 0x88, 0x6a, 0xcc, 0x00, - 0xac, 0x6a, 0xc8, 0x5b, - 0x00, 0x65, 0xc2, 0x5b, - 0x38, 0x6a, 0xcc, 0x00, - 0xa3, 0x6a, 0xcc, 0x5b, - 0xff, 0x38, 0x12, 0x69, - 0x80, 0x02, 0x04, 0x00, - 0xe7, 0x35, 0x6a, 0x08, - 0x03, 0x69, 0x18, 0x31, - 0x03, 0x69, 0x10, 0x30, - 0xff, 0x6a, 0x10, 0x00, - 0xff, 0x6a, 0x12, 0x00, - 0xff, 0x6a, 0x14, 0x00, - 0x22, 0x38, 0xc8, 0x28, - 0x01, 0x38, 0x1c, 0x61, - 0x02, 0x64, 0xc8, 0x00, - 0x01, 0x38, 0x1c, 0x61, - 0xbf, 0x35, 0x6a, 0x08, - 0xff, 0x64, 0xf8, 0x09, - 0xff, 0x35, 0x26, 0x09, - 0x80, 0x02, 0xa4, 0x69, - 0x10, 0x0c, 0x7a, 0x69, - 0x80, 0x94, 0x22, 0x79, - 0x00, 0x35, 0x0a, 0x5b, - 0x80, 0x02, 0xa4, 0x69, - 0xff, 0x65, 0x94, 0x79, - 0x01, 0x38, 0x70, 0x71, - 0xff, 0x38, 0x70, 0x18, - 0xff, 0x38, 0x94, 0x79, - 0x80, 0xea, 0x4a, 0x61, - 0xef, 0x38, 0xc8, 0x18, - 0x80, 0x6a, 0xc8, 0x00, - 0x00, 0x65, 0x3c, 0x49, - 0x33, 0x38, 0xc8, 0x28, - 0xff, 0x64, 0xd0, 0x09, - 0x04, 0x39, 0xc0, 0x31, - 0x09, 0x6a, 0xd6, 0x01, - 0x80, 0xeb, 0x42, 0x79, - 0xf7, 0xeb, 0xd6, 0x09, - 0x08, 0xeb, 0x46, 0x69, - 0x01, 0x6a, 0xd6, 0x01, - 0x08, 0xe9, 0x10, 0x31, - 0x03, 0x8c, 0x10, 0x30, - 0xff, 0x38, 0x70, 0x18, - 0x88, 0x6a, 0xcc, 0x00, - 0x39, 0x6a, 0xce, 0x5b, - 0x08, 0x6a, 0x18, 0x01, - 0xff, 0x6a, 0x1a, 0x09, - 0xff, 0x6a, 0x1c, 0x09, - 0x0d, 0x93, 0x26, 0x01, - 0x00, 0x65, 0x78, 0x5c, - 0x88, 0x6a, 0xcc, 0x00, - 0x00, 0x65, 0x6a, 0x5c, - 0x00, 0x65, 0xc2, 0x5b, - 0xff, 0x6a, 0xc8, 0x08, - 0x08, 0x39, 0x72, 0x18, - 0x00, 0x3a, 0x74, 0x20, - 0x00, 0x65, 0x02, 0x41, - 0x01, 0x0c, 0x6c, 0x79, - 0x10, 0x0c, 0x02, 0x79, - 0x10, 0x0c, 0x7a, 0x69, - 0x01, 0xfc, 0x70, 0x79, - 0xff, 0x6a, 0x70, 0x08, - 0x01, 0x0c, 0x76, 0x79, - 0x10, 0x0c, 0x02, 0x79, - 0x00, 0x65, 0xae, 0x59, - 0x01, 0xfc, 0x94, 0x69, - 0x40, 0x0d, 0x84, 0x69, - 0xb1, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0x94, 0x41, - 0x2e, 0xfc, 0xa2, 0x28, - 0x3f, 0x38, 0xc8, 0x08, - 0x00, 0x51, 0x94, 0x71, - 0xff, 0x6a, 0xc8, 0x08, - 0xf8, 0x39, 0x72, 0x18, - 0xff, 0x3a, 0x74, 0x20, - 0x01, 0x38, 0x70, 0x18, - 0x00, 0x65, 0x86, 0x41, - 0x03, 0x08, 0x52, 0x31, - 0xff, 0x38, 0x50, 0x09, - 0x12, 0x01, 0x02, 0x00, - 0xff, 0x08, 0x52, 0x09, - 0xff, 0x09, 0x54, 0x09, - 0xff, 0x0a, 0x56, 0x09, - 0xff, 0x38, 0x50, 0x09, - 0x00, 0x65, 0xaa, 0x40, - 0x10, 0x0c, 0xa4, 0x79, - 0x00, 0x65, 0xae, 0x59, - 0x7f, 0x02, 0x04, 0x08, - 0xe1, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0xaa, 0x40, - 0x04, 0x93, 0xc2, 0x69, - 0xdf, 0x93, 0x26, 0x09, - 0x20, 0x93, 0xb2, 0x69, - 0x02, 0x93, 0x26, 0x01, - 0x01, 0x94, 0xb6, 0x79, - 0x01, 0x94, 0xb6, 0x79, - 0x01, 0x94, 0xb6, 0x79, - 0x01, 0x94, 0xb6, 0x79, - 0x01, 0x94, 0xb6, 0x79, - 0x10, 0x94, 0xc0, 0x69, - 0xd7, 0x93, 0x26, 0x09, - 0x28, 0x93, 0xc4, 0x69, - 0xff, 0x6a, 0xd4, 0x0c, - 0x00, 0x65, 0x2a, 0x5b, - 0x05, 0xb4, 0x10, 0x31, - 0x02, 0x6a, 0x1a, 0x31, - 0x03, 0x8c, 0x10, 0x30, - 0x88, 0x6a, 0xcc, 0x00, - 0xb4, 0x6a, 0xcc, 0x5b, - 0xff, 0x6a, 0x1a, 0x09, - 0xff, 0x6a, 0x1c, 0x09, - 0x00, 0x65, 0xc2, 0x5b, - 0x3d, 0x6a, 0x0a, 0x5b, - 0xac, 0x6a, 0x26, 0x01, - 0x04, 0x0b, 0xde, 0x69, - 0x04, 0x0b, 0xe4, 0x69, - 0x10, 0x0c, 0xe0, 0x79, - 0x02, 0x03, 0xe8, 0x79, - 0x11, 0x0c, 0xe4, 0x79, - 0xd7, 0x93, 0x26, 0x09, - 0x28, 0x93, 0xea, 0x69, - 0x12, 0x01, 0x02, 0x00, - 0x00, 0x65, 0xaa, 0x40, - 0x00, 0x65, 0x2a, 0x5b, - 0xff, 0x06, 0x44, 0x09, - 0x00, 0x65, 0xaa, 0x40, - 0x10, 0x3d, 0x06, 0x00, - 0xff, 0x34, 0xca, 0x08, - 0x80, 0x65, 0x1c, 0x62, - 0x0f, 0xa1, 0xca, 0x08, - 0x07, 0xa1, 0xca, 0x08, - 0x40, 0xa0, 0xc8, 0x08, - 0x00, 0x65, 0xca, 0x00, - 0x80, 0x65, 0xca, 0x00, - 0x80, 0xa0, 0x0c, 0x7a, - 0xff, 0x65, 0x0c, 0x08, - 0x00, 0x65, 0x1e, 0x42, - 0x20, 0xa0, 0x24, 0x7a, - 0xff, 0x65, 0x0c, 0x08, - 0x00, 0x65, 0xba, 0x5b, - 0xa0, 0x3d, 0x2c, 0x62, - 0x23, 0xa0, 0x0c, 0x08, - 0x00, 0x65, 0xba, 0x5b, - 0xa0, 0x3d, 0x2c, 0x62, - 0x00, 0xb9, 0x24, 0x42, - 0xff, 0x65, 0x24, 0x62, - 0xa1, 0x6a, 0x22, 0x01, - 0xff, 0x6a, 0xd4, 0x08, - 0x10, 0x51, 0x2c, 0x72, - 0x40, 0x6a, 0x18, 0x00, - 0xff, 0x65, 0x0c, 0x08, - 0x00, 0x65, 0xba, 0x5b, - 0xa0, 0x3d, 0xf6, 0x71, - 0x40, 0x6a, 0x18, 0x00, - 0xff, 0x34, 0xa6, 0x08, - 0x80, 0x34, 0x34, 0x62, - 0x7f, 0xa0, 0x40, 0x09, - 0x08, 0x6a, 0x68, 0x00, - 0x00, 0x65, 0xaa, 0x40, - 0x64, 0x6a, 0x00, 0x5b, - 0x80, 0x64, 0xaa, 0x6a, - 0x04, 0x64, 0x8c, 0x72, - 0x02, 0x64, 0x92, 0x72, - 0x00, 0x6a, 0x54, 0x72, - 0x03, 0x64, 0xa6, 0x72, - 0x01, 0x64, 0x88, 0x72, - 0x07, 0x64, 0xe8, 0x72, - 0x08, 0x64, 0x50, 0x72, - 0x23, 0x64, 0xec, 0x72, - 0x11, 0x6a, 0x22, 0x01, - 0x07, 0x6a, 0xf2, 0x5a, - 0xff, 0x06, 0xd4, 0x08, - 0x00, 0x65, 0xaa, 0x40, - 0xff, 0xa8, 0x58, 0x6a, - 0xff, 0xa2, 0x70, 0x7a, - 0x01, 0x6a, 0x6a, 0x00, - 0x00, 0xb9, 0xe6, 0x5b, - 0xff, 0xa2, 0x70, 0x7a, - 0x71, 0x6a, 0x22, 0x01, - 0xff, 0x6a, 0xd4, 0x08, - 0x40, 0x51, 0x70, 0x62, - 0x0d, 0x6a, 0x6a, 0x00, - 0x00, 0xb9, 0xe6, 0x5b, - 0xff, 0x3e, 0x74, 0x09, - 0xff, 0x90, 0x7c, 0x08, - 0x00, 0x65, 0x4e, 0x58, - 0x00, 0x65, 0xbc, 0x40, - 0x20, 0xa0, 0x78, 0x6a, - 0xff, 0x37, 0xc8, 0x08, - 0x00, 0x6a, 0x90, 0x5b, - 0xff, 0x6a, 0xa6, 0x5b, - 0xff, 0xf8, 0xc8, 0x08, - 0xff, 0x4f, 0xc8, 0x08, - 0x01, 0x6a, 0x90, 0x5b, - 0x00, 0xb9, 0xa6, 0x5b, - 0x01, 0x4f, 0x9e, 0x18, - 0x02, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0x80, 0x5c, - 0x00, 0x65, 0xbc, 0x40, - 0x41, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0xaa, 0x40, - 0x04, 0xa0, 0x40, 0x01, - 0x00, 0x65, 0x98, 0x5c, - 0x00, 0x65, 0xbc, 0x40, - 0x10, 0x36, 0x50, 0x7a, - 0x05, 0x38, 0x46, 0x31, - 0x04, 0x14, 0x58, 0x31, - 0x03, 0xa9, 0x60, 0x31, - 0xa3, 0x6a, 0xcc, 0x00, - 0x38, 0x6a, 0xcc, 0x5b, - 0xac, 0x6a, 0xcc, 0x00, - 0x14, 0x6a, 0xce, 0x5b, - 0xa9, 0x6a, 0xd0, 0x5b, - 0x00, 0x65, 0x50, 0x42, - 0xef, 0x36, 0x6c, 0x08, - 0x00, 0x65, 0x50, 0x42, - 0x0f, 0x64, 0xc8, 0x08, - 0x07, 0x64, 0xc8, 0x08, - 0x00, 0x37, 0x6e, 0x00, - 0xff, 0x6a, 0xa4, 0x00, - 0x00, 0x65, 0x60, 0x5b, - 0xff, 0x51, 0xbc, 0x72, - 0x20, 0x36, 0xc6, 0x7a, - 0x00, 0x90, 0x4e, 0x5b, - 0x00, 0x65, 0xc8, 0x42, - 0xff, 0x06, 0xd4, 0x08, - 0x00, 0x65, 0xba, 0x5b, - 0xe0, 0x3d, 0xe2, 0x62, - 0x20, 0x12, 0xe2, 0x62, - 0x51, 0x6a, 0xf6, 0x5a, - 0x00, 0x65, 0x48, 0x5b, - 0xff, 0x37, 0xc8, 0x08, - 0x00, 0xa1, 0xda, 0x62, - 0x04, 0xa0, 0xda, 0x7a, - 0xfb, 0xa0, 0x40, 0x09, - 0x80, 0x36, 0x6c, 0x00, - 0x80, 0xa0, 0x50, 0x7a, - 0x7f, 0xa0, 0x40, 0x09, - 0xff, 0x6a, 0xf2, 0x5a, - 0x00, 0x65, 0x50, 0x42, - 0x04, 0xa0, 0xe0, 0x7a, - 0x00, 0x65, 0x98, 0x5c, - 0x00, 0x65, 0xe2, 0x42, - 0x00, 0x65, 0x80, 0x5c, - 0x31, 0x6a, 0x22, 0x01, - 0x0c, 0x6a, 0xf2, 0x5a, - 0x00, 0x65, 0x50, 0x42, - 0x61, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0x50, 0x42, - 0x51, 0x6a, 0xf6, 0x5a, - 0x51, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0x50, 0x42, - 0x10, 0x3d, 0x06, 0x00, - 0xff, 0x65, 0x68, 0x0c, - 0xff, 0x06, 0xd4, 0x08, - 0x01, 0x0c, 0xf8, 0x7a, - 0x04, 0x0c, 0xfa, 0x6a, - 0xe0, 0x03, 0x7a, 0x08, - 0xe0, 0x3d, 0x06, 0x63, - 0xff, 0x65, 0xcc, 0x08, - 0xff, 0x12, 0xda, 0x0c, - 0xff, 0x06, 0xd4, 0x0c, - 0xd1, 0x6a, 0x22, 0x01, - 0x00, 0x65, 0xaa, 0x40, - 0xff, 0x65, 0x26, 0x09, - 0x01, 0x0b, 0x1a, 0x6b, - 0x10, 0x0c, 0x0c, 0x7b, - 0x04, 0x0b, 0x14, 0x6b, - 0xff, 0x6a, 0xca, 0x08, - 0x04, 0x93, 0x18, 0x6b, - 0x01, 0x94, 0x16, 0x7b, - 0x10, 0x94, 0x18, 0x6b, - 0x80, 0x3d, 0x1e, 0x73, - 0x0f, 0x04, 0x22, 0x6b, - 0x02, 0x03, 0x22, 0x7b, - 0x11, 0x0c, 0x1e, 0x7b, - 0xc7, 0x93, 0x26, 0x09, - 0xff, 0x99, 0xd4, 0x08, - 0x38, 0x93, 0x24, 0x6b, - 0xff, 0x6a, 0xd4, 0x0c, - 0x80, 0x36, 0x28, 0x6b, - 0x21, 0x6a, 0x22, 0x05, - 0xff, 0x65, 0x20, 0x09, - 0xff, 0x51, 0x36, 0x63, - 0xff, 0x37, 0xc8, 0x08, - 0xa1, 0x6a, 0x42, 0x43, - 0xff, 0x51, 0xc8, 0x08, - 0xb9, 0x6a, 0x42, 0x43, - 0xff, 0x90, 0xa4, 0x08, - 0xff, 0xba, 0x46, 0x73, - 0xff, 0xba, 0x20, 0x09, - 0xff, 0x65, 0xca, 0x18, - 0x00, 0x6c, 0x3a, 0x63, - 0xff, 0x90, 0xca, 0x0c, - 0xff, 0x6a, 0xca, 0x04, - 0x20, 0x36, 0x5a, 0x7b, - 0x00, 0x90, 0x2e, 0x5b, - 0xff, 0x65, 0x5a, 0x73, - 0xff, 0x52, 0x58, 0x73, - 0xff, 0xba, 0xcc, 0x08, - 0xff, 0x52, 0x20, 0x09, - 0xff, 0x66, 0x74, 0x09, - 0xff, 0x65, 0x20, 0x0d, - 0xff, 0xba, 0x7e, 0x0c, - 0x00, 0x6a, 0x86, 0x5c, - 0x0d, 0x6a, 0x6a, 0x00, - 0x00, 0x51, 0xe6, 0x43, - 0xff, 0x3f, 0xb4, 0x73, - 0xff, 0x6a, 0xa2, 0x00, - 0x00, 0x3f, 0x2e, 0x5b, - 0xff, 0x65, 0xb4, 0x73, - 0x20, 0x36, 0x6c, 0x00, - 0x20, 0xa0, 0x6e, 0x6b, - 0xff, 0xb9, 0xa2, 0x0c, - 0xff, 0x6a, 0xa2, 0x04, - 0xff, 0x65, 0xa4, 0x08, - 0xe0, 0x6a, 0xcc, 0x00, - 0x45, 0x6a, 0xda, 0x5b, - 0x01, 0x6a, 0xd0, 0x01, - 0x09, 0x6a, 0xd6, 0x01, - 0x80, 0xeb, 0x7a, 0x7b, - 0x01, 0x6a, 0xd6, 0x01, - 0x01, 0xe9, 0xa4, 0x34, - 0x88, 0x6a, 0xcc, 0x00, - 0x45, 0x6a, 0xda, 0x5b, - 0x01, 0x6a, 0x18, 0x01, - 0xff, 0x6a, 0x1a, 0x09, - 0xff, 0x6a, 0x1c, 0x09, - 0x0d, 0x6a, 0x26, 0x01, - 0x00, 0x65, 0x78, 0x5c, - 0xff, 0x99, 0xa4, 0x0c, - 0xff, 0x65, 0xa4, 0x08, - 0xe0, 0x6a, 0xcc, 0x00, - 0x45, 0x6a, 0xda, 0x5b, - 0x01, 0x6a, 0xd0, 0x01, - 0x01, 0x6a, 0xdc, 0x05, - 0x88, 0x6a, 0xcc, 0x00, - 0x45, 0x6a, 0xda, 0x5b, - 0x01, 0x6a, 0x18, 0x01, - 0xff, 0x6a, 0x1a, 0x09, - 0xff, 0x6a, 0x1c, 0x09, - 0x01, 0x6a, 0x26, 0x05, - 0x01, 0x65, 0xd8, 0x31, - 0x09, 0xee, 0xdc, 0x01, - 0x80, 0xee, 0xaa, 0x7b, - 0xff, 0x6a, 0xdc, 0x0d, - 0xff, 0x65, 0x32, 0x09, - 0x0a, 0x93, 0x26, 0x01, - 0x00, 0x65, 0x78, 0x44, - 0xff, 0x37, 0xc8, 0x08, - 0x00, 0x6a, 0x70, 0x5b, - 0xff, 0x52, 0xa2, 0x0c, - 0x01, 0x0c, 0xba, 0x7b, - 0x04, 0x0c, 0xba, 0x6b, - 0xe0, 0x03, 0x06, 0x08, - 0xe0, 0x03, 0x7a, 0x0c, - 0xff, 0x8c, 0x10, 0x08, - 0xff, 0x8d, 0x12, 0x08, - 0xff, 0x8e, 0x14, 0x0c, - 0xff, 0x6c, 0xda, 0x08, - 0xff, 0x6c, 0xda, 0x08, - 0xff, 0x6c, 0xda, 0x08, - 0xff, 0x6c, 0xda, 0x08, - 0xff, 0x6c, 0xda, 0x08, - 0xff, 0x6c, 0xda, 0x08, - 0xff, 0x6c, 0xda, 0x0c, - 0x3d, 0x64, 0xa4, 0x28, - 0x55, 0x64, 0xc8, 0x28, - 0x00, 0x6c, 0xda, 0x18, - 0xff, 0x52, 0xc8, 0x08, - 0x00, 0x6c, 0xda, 0x20, - 0xff, 0x6a, 0xc8, 0x08, - 0x00, 0x6c, 0xda, 0x20, - 0x00, 0x6c, 0xda, 0x24, - 0xff, 0x65, 0xc8, 0x08, - 0xe0, 0x6a, 0xcc, 0x00, - 0x41, 0x6a, 0xd6, 0x5b, - 0xff, 0x90, 0xe2, 0x09, - 0x20, 0x6a, 0xd0, 0x01, - 0x04, 0x35, 0xf8, 0x7b, - 0x1d, 0x6a, 0xdc, 0x01, - 0xdc, 0xee, 0xf4, 0x63, - 0x00, 0x65, 0x0e, 0x44, - 0x01, 0x6a, 0xdc, 0x01, - 0x20, 0xa0, 0xd8, 0x31, - 0x09, 0xee, 0xdc, 0x01, - 0x80, 0xee, 0xfe, 0x7b, - 0x11, 0x6a, 0xdc, 0x01, - 0x50, 0xee, 0x02, 0x64, - 0x20, 0x6a, 0xd0, 0x01, - 0x09, 0x6a, 0xdc, 0x01, - 0x88, 0xee, 0x08, 0x64, - 0x19, 0x6a, 0xdc, 0x01, - 0xd8, 0xee, 0x0c, 0x64, - 0xff, 0x6a, 0xdc, 0x09, - 0x18, 0xee, 0x10, 0x6c, - 0xff, 0x6a, 0xd4, 0x0c, - 0x88, 0x6a, 0xcc, 0x00, - 0x41, 0x6a, 0xd6, 0x5b, - 0x20, 0x6a, 0x18, 0x01, - 0xff, 0x6a, 0x1a, 0x09, - 0xff, 0x6a, 0x1c, 0x09, - 0xff, 0x35, 0x26, 0x09, - 0x04, 0x35, 0x3c, 0x6c, - 0xa0, 0x6a, 0xca, 0x00, - 0x20, 0x65, 0xc8, 0x18, - 0xff, 0x6c, 0x32, 0x09, - 0xff, 0x6c, 0x32, 0x09, - 0xff, 0x6c, 0x32, 0x09, - 0xff, 0x6c, 0x32, 0x09, - 0xff, 0x6c, 0x32, 0x09, - 0xff, 0x6c, 0x32, 0x09, - 0xff, 0x6c, 0x32, 0x09, - 0xff, 0x6c, 0x32, 0x09, - 0x00, 0x65, 0x26, 0x64, - 0x0a, 0x93, 0x26, 0x01, - 0x00, 0x65, 0x78, 0x44, - 0xa0, 0x6a, 0xcc, 0x00, - 0xe8, 0x6a, 0xc8, 0x00, - 0x01, 0x94, 0x40, 0x6c, - 0x10, 0x94, 0x42, 0x6c, - 0x08, 0x94, 0x54, 0x6c, - 0x08, 0x94, 0x54, 0x6c, - 0x08, 0x94, 0x54, 0x6c, - 0x00, 0x65, 0x68, 0x5c, - 0x08, 0x64, 0xc8, 0x18, - 0x00, 0x8c, 0xca, 0x18, - 0x00, 0x65, 0x4a, 0x4c, - 0x00, 0x65, 0x40, 0x44, - 0xf7, 0x93, 0x26, 0x09, - 0x08, 0x93, 0x56, 0x6c, - 0x00, 0x65, 0x68, 0x5c, - 0x08, 0x64, 0xc8, 0x18, - 0x08, 0x64, 0x58, 0x64, - 0xff, 0x6a, 0xd4, 0x0c, - 0x00, 0x65, 0x78, 0x5c, - 0x00, 0x65, 0x68, 0x5c, - 0x00, 0x65, 0x68, 0x5c, - 0x00, 0x65, 0x68, 0x5c, - 0xff, 0x99, 0xda, 0x08, - 0xff, 0x99, 0xda, 0x08, - 0xff, 0x99, 0xda, 0x08, - 0xff, 0x99, 0xda, 0x08, - 0xff, 0x99, 0xda, 0x08, - 0xff, 0x99, 0xda, 0x08, - 0xff, 0x99, 0xda, 0x08, - 0xff, 0x99, 0xda, 0x0c, - 0x08, 0x94, 0x78, 0x7c, - 0xf7, 0x93, 0x26, 0x09, - 0x08, 0x93, 0x7c, 0x6c, - 0xff, 0x6a, 0xd4, 0x0c, - 0xff, 0x40, 0x74, 0x09, - 0xff, 0x90, 0x80, 0x08, - 0xff, 0x6a, 0x72, 0x05, - 0xff, 0x40, 0x94, 0x64, - 0xff, 0x3f, 0x8c, 0x64, - 0xff, 0x6a, 0xca, 0x04, - 0xff, 0x3f, 0x20, 0x09, - 0x01, 0x6a, 0x6a, 0x00, - 0x00, 0xb9, 0xe6, 0x5b, - 0xff, 0xba, 0x7e, 0x0c, - 0xff, 0x40, 0x20, 0x09, - 0xff, 0xba, 0x80, 0x0c, - 0xff, 0x3f, 0x74, 0x09, - 0xff, 0x90, 0x7e, 0x0c, -}; - -static int aic7xxx_patch15_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch15_func(struct aic7xxx_host *p) -{ - return ((p->bugs & AHC_BUG_SCBCHAN_UPLOAD) != 0); -} - -static int aic7xxx_patch14_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch14_func(struct aic7xxx_host *p) -{ - return ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0); -} - -static int aic7xxx_patch13_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch13_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_WIDE) != 0); -} - -static int aic7xxx_patch12_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch12_func(struct aic7xxx_host *p) -{ - return ((p->bugs & AHC_BUG_AUTOFLUSH) != 0); -} - -static int aic7xxx_patch11_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch11_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_ULTRA2) == 0); -} - -static int aic7xxx_patch10_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch10_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_CMD_CHAN) == 0); -} - -static int aic7xxx_patch9_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch9_func(struct aic7xxx_host *p) -{ - return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895); -} - -static int aic7xxx_patch8_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch8_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_ULTRA) != 0); -} - -static int aic7xxx_patch7_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch7_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_ULTRA2) != 0); -} - -static int aic7xxx_patch6_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch6_func(struct aic7xxx_host *p) -{ - return ((p->flags & AHC_PAGESCBS) == 0); -} - -static int aic7xxx_patch5_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch5_func(struct aic7xxx_host *p) -{ - return ((p->flags & AHC_PAGESCBS) != 0); -} - -static int aic7xxx_patch4_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch4_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_QUEUE_REGS) != 0); -} - -static int aic7xxx_patch3_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch3_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_TWIN) != 0); -} - -static int aic7xxx_patch2_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch2_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_QUEUE_REGS) == 0); -} - -static int aic7xxx_patch1_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch1_func(struct aic7xxx_host *p) -{ - return ((p->features & AHC_CMD_CHAN) != 0); -} - -static int aic7xxx_patch0_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch0_func(struct aic7xxx_host *p) -{ - return (0); -} - -struct sequencer_patch { - int (*patch_func)(struct aic7xxx_host *); - unsigned int begin :10, - skip_instr :10, - skip_patch :12; -} sequencer_patches[] = { - { aic7xxx_patch1_func, 3, 2, 1 }, - { aic7xxx_patch2_func, 7, 1, 1 }, - { aic7xxx_patch2_func, 8, 1, 1 }, - { aic7xxx_patch3_func, 11, 4, 1 }, - { aic7xxx_patch4_func, 16, 3, 2 }, - { aic7xxx_patch0_func, 19, 4, 1 }, - { aic7xxx_patch5_func, 23, 1, 1 }, - { aic7xxx_patch6_func, 26, 1, 1 }, - { aic7xxx_patch1_func, 29, 1, 2 }, - { aic7xxx_patch0_func, 30, 3, 1 }, - { aic7xxx_patch3_func, 39, 4, 1 }, - { aic7xxx_patch7_func, 43, 3, 2 }, - { aic7xxx_patch0_func, 46, 3, 1 }, - { aic7xxx_patch8_func, 52, 7, 1 }, - { aic7xxx_patch3_func, 60, 3, 1 }, - { aic7xxx_patch7_func, 63, 2, 1 }, - { aic7xxx_patch7_func, 102, 1, 2 }, - { aic7xxx_patch0_func, 103, 2, 1 }, - { aic7xxx_patch7_func, 107, 2, 1 }, - { aic7xxx_patch9_func, 109, 1, 1 }, - { aic7xxx_patch10_func, 110, 2, 1 }, - { aic7xxx_patch7_func, 113, 1, 2 }, - { aic7xxx_patch0_func, 114, 1, 1 }, - { aic7xxx_patch1_func, 118, 1, 1 }, - { aic7xxx_patch1_func, 121, 3, 3 }, - { aic7xxx_patch11_func, 123, 1, 1 }, - { aic7xxx_patch0_func, 124, 5, 1 }, - { aic7xxx_patch7_func, 132, 1, 1 }, - { aic7xxx_patch9_func, 133, 1, 1 }, - { aic7xxx_patch10_func, 134, 3, 1 }, - { aic7xxx_patch7_func, 137, 3, 2 }, - { aic7xxx_patch0_func, 140, 2, 1 }, - { aic7xxx_patch7_func, 142, 5, 2 }, - { aic7xxx_patch0_func, 147, 3, 1 }, - { aic7xxx_patch7_func, 150, 1, 2 }, - { aic7xxx_patch0_func, 151, 2, 1 }, - { aic7xxx_patch1_func, 153, 15, 4 }, - { aic7xxx_patch11_func, 166, 1, 2 }, - { aic7xxx_patch0_func, 167, 1, 1 }, - { aic7xxx_patch0_func, 168, 10, 1 }, - { aic7xxx_patch7_func, 181, 1, 2 }, - { aic7xxx_patch0_func, 182, 2, 1 }, - { aic7xxx_patch7_func, 184, 18, 1 }, - { aic7xxx_patch1_func, 202, 3, 3 }, - { aic7xxx_patch7_func, 204, 1, 1 }, - { aic7xxx_patch0_func, 205, 4, 1 }, - { aic7xxx_patch7_func, 210, 2, 1 }, - { aic7xxx_patch7_func, 215, 13, 3 }, - { aic7xxx_patch12_func, 218, 1, 1 }, - { aic7xxx_patch12_func, 219, 4, 1 }, - { aic7xxx_patch1_func, 229, 3, 3 }, - { aic7xxx_patch11_func, 231, 1, 1 }, - { aic7xxx_patch0_func, 232, 5, 1 }, - { aic7xxx_patch11_func, 237, 1, 2 }, - { aic7xxx_patch0_func, 238, 9, 1 }, - { aic7xxx_patch13_func, 254, 1, 2 }, - { aic7xxx_patch0_func, 255, 1, 1 }, - { aic7xxx_patch4_func, 316, 1, 2 }, - { aic7xxx_patch0_func, 317, 1, 1 }, - { aic7xxx_patch2_func, 320, 1, 1 }, - { aic7xxx_patch1_func, 330, 3, 2 }, - { aic7xxx_patch0_func, 333, 5, 1 }, - { aic7xxx_patch13_func, 341, 1, 2 }, - { aic7xxx_patch0_func, 342, 1, 1 }, - { aic7xxx_patch5_func, 347, 1, 1 }, - { aic7xxx_patch11_func, 389, 15, 2 }, - { aic7xxx_patch14_func, 402, 1, 1 }, - { aic7xxx_patch1_func, 441, 7, 2 }, - { aic7xxx_patch0_func, 448, 8, 1 }, - { aic7xxx_patch1_func, 457, 4, 2 }, - { aic7xxx_patch0_func, 461, 6, 1 }, - { aic7xxx_patch1_func, 467, 4, 2 }, - { aic7xxx_patch0_func, 471, 3, 1 }, - { aic7xxx_patch10_func, 481, 10, 1 }, - { aic7xxx_patch1_func, 500, 22, 5 }, - { aic7xxx_patch11_func, 508, 4, 1 }, - { aic7xxx_patch7_func, 512, 7, 3 }, - { aic7xxx_patch15_func, 512, 5, 2 }, - { aic7xxx_patch0_func, 517, 2, 1 }, - { aic7xxx_patch10_func, 522, 50, 3 }, - { aic7xxx_patch14_func, 543, 17, 2 }, - { aic7xxx_patch0_func, 560, 4, 1 }, - { aic7xxx_patch10_func, 572, 4, 1 }, - { aic7xxx_patch5_func, 576, 2, 1 }, - { aic7xxx_patch5_func, 579, 9, 1 }, - -}; diff --git a/drivers/scsi/aic7xxx_old/scsi_message.h b/drivers/scsi/aic7xxx_old/scsi_message.h deleted file mode 100644 index a79f89c6517..00000000000 --- a/drivers/scsi/aic7xxx_old/scsi_message.h +++ /dev/null @@ -1,49 +0,0 @@ -/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */ -#define MSG_CMDCOMPLETE 0x00 /* M/M */ -#define MSG_EXTENDED 0x01 /* O/O */ -#define MSG_SAVEDATAPOINTER 0x02 /* O/O */ -#define MSG_RESTOREPOINTERS 0x03 /* O/O */ -#define MSG_DISCONNECT 0x04 /* O/O */ -#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */ -#define MSG_ABORT 0x06 /* O/M */ -#define MSG_MESSAGE_REJECT 0x07 /* M/M */ -#define MSG_NOOP 0x08 /* M/M */ -#define MSG_PARITY_ERROR 0x09 /* M/M */ -#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */ -#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */ -#define MSG_BUS_DEV_RESET 0x0c /* O/M */ -#define MSG_ABORT_TAG 0x0d /* O/O */ -#define MSG_CLEAR_QUEUE 0x0e /* O/O */ -#define MSG_INIT_RECOVERY 0x0f /* O/O */ -#define MSG_REL_RECOVERY 0x10 /* O/O */ -#define MSG_TERM_IO_PROC 0x11 /* O/O */ - -/* Messages (2 byte) */ -#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */ -#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */ -#define MSG_ORDERED_Q_TAG 0x22 /* O/O */ -#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */ - -/* Identify message */ /* M/M */ -#define MSG_IDENTIFYFLAG 0x80 -#define MSG_IDENTIFY_DISCFLAG 0x40 -#define MSG_IDENTIFY(lun, disc) (((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun)) -#define MSG_ISIDENTIFY(m) ((m) & MSG_IDENTIFYFLAG) - -/* Extended messages (opcode and length) */ -#define MSG_EXT_SDTR 0x01 -#define MSG_EXT_SDTR_LEN 0x03 - -#define MSG_EXT_WDTR 0x03 -#define MSG_EXT_WDTR_LEN 0x02 -#define MSG_EXT_WDTR_BUS_8_BIT 0x00 -#define MSG_EXT_WDTR_BUS_16_BIT 0x01 -#define MSG_EXT_WDTR_BUS_32_BIT 0x02 - -#define MSG_EXT_PPR 0x04 -#define MSG_EXT_PPR_LEN 0x06 -#define MSG_EXT_PPR_OPTION_ST 0x00 -#define MSG_EXT_PPR_OPTION_DT_CRC 0x02 -#define MSG_EXT_PPR_OPTION_DT_UNITS 0x03 -#define MSG_EXT_PPR_OPTION_DT_CRC_QUICK 0x04 -#define MSG_EXT_PPR_OPTION_DT_UNITS_QUICK 0x05 diff --git a/drivers/scsi/aic7xxx_old/sequencer.h b/drivers/scsi/aic7xxx_old/sequencer.h deleted file mode 100644 index ee66855222b..00000000000 --- a/drivers/scsi/aic7xxx_old/sequencer.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Instruction formats for the sequencer program downloaded to - * Aic7xxx SCSI host adapters - * - * Copyright (c) 1997, 1998 Justin T. Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of - * the GNU General Public License ("GPL") and the terms of the GPL would require the - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: sequencer.h,v 1.3 1997/09/27 19:37:31 gibbs Exp $ - */ - -#ifdef __LITTLE_ENDIAN_BITFIELD -struct ins_format1 { - unsigned int - immediate : 8, - source : 9, - destination : 9, - ret : 1, - opcode : 4, - parity : 1; -}; - -struct ins_format2 { - unsigned int - shift_control : 8, - source : 9, - destination : 9, - ret : 1, - opcode : 4, - parity : 1; -}; - -struct ins_format3 { - unsigned int - immediate : 8, - source : 9, - address : 10, - opcode : 4, - parity : 1; -}; -#elif defined(__BIG_ENDIAN_BITFIELD) -struct ins_format1 { - unsigned int - parity : 1, - opcode : 4, - ret : 1, - destination : 9, - source : 9, - immediate : 8; -}; - -struct ins_format2 { - unsigned int - parity : 1, - opcode : 4, - ret : 1, - destination : 9, - source : 9, - shift_control : 8; -}; - -struct ins_format3 { - unsigned int - parity : 1, - opcode : 4, - address : 10, - source : 9, - immediate : 8; -}; -#endif - -union ins_formats { - struct ins_format1 format1; - struct ins_format2 format2; - struct ins_format3 format3; - unsigned char bytes[4]; - unsigned int integer; -}; -struct instruction { - union ins_formats format; - unsigned int srcline; - struct symbol *patch_label; - struct { - struct instruction *stqe_next; - } links; -}; - -#define AIC_OP_OR 0x0 -#define AIC_OP_AND 0x1 -#define AIC_OP_XOR 0x2 -#define AIC_OP_ADD 0x3 -#define AIC_OP_ADC 0x4 -#define AIC_OP_ROL 0x5 -#define AIC_OP_BMOV 0x6 - -#define AIC_OP_JMP 0x8 -#define AIC_OP_JC 0x9 -#define AIC_OP_JNC 0xa -#define AIC_OP_CALL 0xb -#define AIC_OP_JNE 0xc -#define AIC_OP_JNZ 0xd -#define AIC_OP_JE 0xe -#define AIC_OP_JZ 0xf - -/* Pseudo Ops */ -#define AIC_OP_SHL 0x10 -#define AIC_OP_SHR 0x20 -#define AIC_OP_ROR 0x30 diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c index 64136c56e70..33072388ea1 100644 --- a/drivers/scsi/aic94xx/aic94xx_dev.c +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; - if (dev->dev_type == SATA_PM_PORT) + if (dev->dev_type == SAS_SATA_PM_PORT) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); else if (dev->tproto) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); @@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; u32 qdepth = 0; - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) { if (ata_id_has_ncq(ata_dev->id)) qdepth = ata_id_queue_depth(ata_dev->id); asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, @@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || - dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || + dev->dev_type == SAS_SATA_PM_PORT) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); @@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); if (dev->port->oob_mode != SATA_OOB_MODE) { flags |= OPEN_REQUIRED; - if ((dev->dev_type == SATA_DEV) || + if ((dev->dev_type == SAS_SATA_DEV) || (dev->tproto & SAS_PROTOCOL_STP)) { struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; if (rps_resp->frame_type == SMP_RESPONSE && @@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev) } else { flags |= CONCURRENT_CONN_SUPP; if (!dev->parent && - (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV)) + (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)) asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 4); else @@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); } } - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) flags |= SATA_MULTIPORT; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); @@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { i = asd_init_sata(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); @@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev) } } - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); if (rdev->I_T_nexus_loss_timeout > 0) asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, @@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev) spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { - case SATA_PM: + case SAS_SATA_PM: res = asd_init_sata_pm_ddb(dev); break; - case SATA_PM_PORT: + case SAS_SATA_PM_PORT: res = asd_init_sata_pm_port_ddb(dev); break; default: diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index 81b736c76ff..4df867e07b2 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy) memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); - phy->identify_frame->dev_type = SAS_END_DEV; + phy->identify_frame->dev_type = SAS_END_DEVICE; if (phy->sas_phy.role & PHY_ROLE_INITIATOR) phy->identify_frame->initiator_bits = phy->sas_phy.iproto; if (phy->sas_phy.role & PHY_ROLE_TARGET) diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 393e7ce8e95..59b86e260ce 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -505,7 +505,8 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK; scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); - memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16); + memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF); scb->ssp_task.conn_handle = cpu_to_le16( diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index cf9040933da..d4c35df3d4a 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev) struct sas_phy *phy = sas_get_local_phy(dev); /* Standard mandates link reset for ATA (type 0) and * hard reset for SSP (type 1) */ - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 33c52bc2c7b..652b41b4ddb 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = arcmsr_host_attrs, + .no_write_same = 1, }; static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, @@ -1035,7 +1036,6 @@ static void arcmsr_remove(struct pci_dev *pdev) pci_release_regions(pdev); scsi_host_put(host); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static void arcmsr_shutdown(struct pci_dev *pdev) @@ -2500,16 +2500,15 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) { uint32_t cdb_phyaddr, cdb_phyaddr_hi32; - dma_addr_t dma_coherent_handle; + /* ******************************************************************** ** here we need to tell iop 331 our freeccb.HighPart ** if freeccb.HighPart is not zero ******************************************************************** */ - dma_coherent_handle = acb->dma_coherent_handle; - cdb_phyaddr = (uint32_t)(dma_coherent_handle); - cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16); + cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle); + cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle); acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; /* *********************************************************************** diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 3e1172adb37..2e797a36760 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -62,13 +62,6 @@ */ #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE /* - * SCSI-II Linked command support. - * - * The higher level code doesn't support linked commands yet, and so the option - * is undef'd here. - */ -#undef CONFIG_SCSI_ACORNSCSI_LINK -/* * SCSI-II Synchronous transfer support. * * Tried and tested... @@ -160,10 +153,6 @@ #error "Yippee! ABORT TAG is now defined! Remove this error!" #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK -#error SCSI2 LINKed commands not supported (yet)! -#endif - #ifdef USE_DMAC /* * DMAC setup parameters @@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host) } break; -#ifdef CONFIG_SCSI_ACORNSCSI_LINK - case LINKED_CMD_COMPLETE: - case LINKED_FLG_CMD_COMPLETE: - /* - * We don't support linked commands yet - */ - if (0) { -#if (DEBUG & DEBUG_LINK) - printk("scsi%d.%c: lun %d tag %d linked command complete\n", - host->host->host_no, acornscsi_target(host), host->SCpnt->tag); -#endif - /* - * A linked command should only terminate with one of these messages - * if there are more linked commands available. - */ - if (!host->SCpnt->next_link) { - printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n", - instance->host_no, acornscsi_target(host), host->SCpnt->tag); - acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); - msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); - } else { - struct scsi_cmnd *SCpnt = host->SCpnt; - - acornscsi_dma_cleanup(host); - - host->SCpnt = host->SCpnt->next_link; - host->SCpnt->tag = SCpnt->tag; - SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status; - SCpnt->done(SCpnt); - - /* initialise host->SCpnt->SCp */ - } - break; - } -#endif - default: /* reject message */ printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n", host->host->host_no, acornscsi_target(host), @@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host) #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE " TAG" #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK - " LINK" -#endif #if (DEBUG & DEBUG_NO_WRITE) " NOWRITE (" __stringify(NO_WRITE) ")" #endif @@ -2836,42 +2786,34 @@ char *acornscsi_info(struct Scsi_Host *host) return string; } -int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, - int length, int inout) +static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance) { - int pos, begin = 0, devidx; + int devidx; struct scsi_device *scd; AS_Host *host; - char *p = buffer; - - if (inout == 1) - return -EINVAL; host = (AS_Host *)instance->hostdata; - p += sprintf(p, "AcornSCSI driver v%d.%d.%d" + seq_printf(m, "AcornSCSI driver v%d.%d.%d" #ifdef CONFIG_SCSI_ACORNSCSI_SYNC " SYNC" #endif #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE " TAG" #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK - " LINK" -#endif #if (DEBUG & DEBUG_NO_WRITE) " NOWRITE (" __stringify(NO_WRITE) ")" #endif "\n\n", VER_MAJOR, VER_MINOR, VER_PATCH); - p += sprintf(p, "SBIC: WD33C93A Address: %p IRQ : %d\n", + seq_printf(m, "SBIC: WD33C93A Address: %p IRQ : %d\n", host->base + SBIC_REGIDX, host->scsi.irq); #ifdef USE_DMAC - p += sprintf(p, "DMAC: uPC71071 Address: %p IRQ : %d\n\n", + seq_printf(m, "DMAC: uPC71071 Address: %p IRQ : %d\n\n", host->base + DMAC_OFFSET, host->scsi.irq); #endif - p += sprintf(p, "Statistics:\n" + seq_printf(m, "Statistics:\n" "Queued commands: %-10u Issued commands: %-10u\n" "Done commands : %-10u Reads : %-10u\n" "Writes : %-10u Others : %-10u\n" @@ -2886,7 +2828,7 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start, for (devidx = 0; devidx < 9; devidx ++) { unsigned int statptr, prev; - p += sprintf(p, "\n%c:", devidx == 8 ? 'H' : ('0' + devidx)); + seq_printf(m, "\n%c:", devidx == 8 ? 'H' : ('0' + devidx)); statptr = host->status_ptr[devidx] - 10; if ((signed int)statptr < 0) @@ -2896,7 +2838,7 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start, for (; statptr != host->status_ptr[devidx]; statptr = (statptr + 1) & (STATUS_BUFFER_SIZE - 1)) { if (host->status[devidx][statptr].when) { - p += sprintf(p, "%c%02X:%02X+%2ld", + seq_printf(m, "%c%02X:%02X+%2ld", host->status[devidx][statptr].irq ? '-' : ' ', host->status[devidx][statptr].ph, host->status[devidx][statptr].ssr, @@ -2907,51 +2849,32 @@ int acornscsi_proc_info(struct Scsi_Host *instance, char *buffer, char **start, } } - p += sprintf(p, "\nAttached devices:\n"); + seq_printf(m, "\nAttached devices:\n"); shost_for_each_device(scd, instance) { - p += sprintf(p, "Device/Lun TaggedQ Sync\n"); - p += sprintf(p, " %d/%d ", scd->id, scd->lun); + seq_printf(m, "Device/Lun TaggedQ Sync\n"); + seq_printf(m, " %d/%d ", scd->id, scd->lun); if (scd->tagged_supported) - p += sprintf(p, "%3sabled(%3d) ", + seq_printf(m, "%3sabled(%3d) ", scd->simple_tags ? "en" : "dis", scd->current_tag); else - p += sprintf(p, "unsupported "); + seq_printf(m, "unsupported "); if (host->device[scd->id].sync_xfer & 15) - p += sprintf(p, "offset %d, %d ns\n", + seq_printf(m, "offset %d, %d ns\n", host->device[scd->id].sync_xfer & 15, acornscsi_getperiod(host->device[scd->id].sync_xfer)); else - p += sprintf(p, "async\n"); + seq_printf(m, "async\n"); - pos = p - buffer; - if (pos + begin < offset) { - begin += pos; - p = buffer; - } - pos = p - buffer; - if (pos + begin > offset + length) { - scsi_device_put(scd); - break; - } } - - pos = p - buffer; - - *start = buffer + (offset - begin); - pos -= offset - begin; - - if (pos > length) - pos = length; - - return pos; + return 0; } static struct scsi_host_template acornscsi_template = { .module = THIS_MODULE, - .proc_info = acornscsi_proc_info, + .show_info = acornscsi_show_info, .name = "AcornSCSI", .info = acornscsi_info, .queuecommand = acornscsi_queuecmd, @@ -2995,7 +2918,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id) ec->irqaddr = ashost->fast + INT_REG; ec->irqmask = 0x0a; - ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost); + ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost); if (ret) { printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n", host->host_no, ashost->scsi.irq, ret); diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c index 9274510294a..32d23212de4 100644 --- a/drivers/scsi/arm/arxescsi.c +++ b/drivers/scsi/arm/arxescsi.c @@ -220,47 +220,21 @@ static const char *arxescsi_info(struct Scsi_Host *host) return string; } -/* - * Function: int arxescsi_proc_info(char *buffer, char **start, off_t offset, - * int length, int host_no, int inout) - * Purpose : Return information about the driver to a user process accessing - * the /proc filesystem. - * Params : buffer - a buffer to write information to - * start - a pointer into this buffer set by this routine to the start - * of the required information. - * offset - offset into information that we have read up to. - * length - length of buffer - * host_no - host number to return information for - * inout - 0 for reading, 1 for writing. - * Returns : length of data written to buffer. - */ static int -arxescsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, - int inout) +arxescsi_show_info(struct seq_file *m, struct Scsi_Host *host) { struct arxescsi_info *info; - char *p = buffer; - int pos; - info = (struct arxescsi_info *)host->hostdata; - if (inout == 1) - return -EINVAL; - - p += sprintf(p, "ARXE 16-bit SCSI driver v%s\n", VERSION); - p += fas216_print_host(&info->info, p); - p += fas216_print_stats(&info->info, p); - p += fas216_print_devices(&info->info, p); - - *start = buffer + offset; - pos = p - buffer - offset; - if (pos > length) - pos = length; - return pos; + seq_printf(m, "ARXE 16-bit SCSI driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; } static struct scsi_host_template arxescsi_template = { - .proc_info = arxescsi_proc_info, + .show_info = arxescsi_show_info, .name = "ARXE SCSI card", .info = arxescsi_info, .queuecommand = fas216_noqueue_command, diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index c93938b246d..8ef810a4476 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -30,16 +30,12 @@ #define NCR5380_write(reg, value) cumanascsi_write(_instance, reg, value) #define NCR5380_intr cumanascsi_intr #define NCR5380_queue_command cumanascsi_queue_command -#define NCR5380_proc_info cumanascsi_proc_info #define NCR5380_implementation_fields \ unsigned ctrl; \ void __iomem *base; \ void __iomem *dma -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 - #include "../NCR5380.h" void cumanascsi_setup(char *str, int *ints) @@ -263,7 +259,7 @@ static int cumanascsi1_probe(struct expansion_card *ec, goto out_unmap; } - ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED, + ret = request_irq(host->irq, cumanascsi_intr, 0, "CumanaSCSI-1", host); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index e3bae93c3c2..abc66f5263e 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -337,50 +337,25 @@ cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length) return ret; } -/* Prototype: int cumanascsi_2_proc_info(char *buffer, char **start, off_t offset, - * int length, int host_no, int inout) - * Purpose : Return information about the driver to a user process accessing - * the /proc filesystem. - * Params : buffer - a buffer to write information to - * start - a pointer into this buffer set by this routine to the start - * of the required information. - * offset - offset into information that we have read up to. - * length - length of buffer - * host_no - host number to return information for - * inout - 0 for reading, 1 for writing. - * Returns : length of data written to buffer. - */ -int cumanascsi_2_proc_info (struct Scsi_Host *host, char *buffer, char **start, off_t offset, - int length, int inout) +static int cumanascsi_2_show_info(struct seq_file *m, struct Scsi_Host *host) { struct cumanascsi2_info *info; - char *p = buffer; - int pos; - - if (inout == 1) - return cumanascsi_2_set_proc_info(host, buffer, length); - info = (struct cumanascsi2_info *)host->hostdata; - p += sprintf(p, "Cumana SCSI II driver v%s\n", VERSION); - p += fas216_print_host(&info->info, p); - p += sprintf(p, "Term : o%s\n", + seq_printf(m, "Cumana SCSI II driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + seq_printf(m, "Term : o%s\n", info->terms ? "n" : "ff"); - p += fas216_print_stats(&info->info, p); - p += fas216_print_devices(&info->info, p); - - *start = buffer + offset; - pos = p - buffer - offset; - if (pos > length) - pos = length; - - return pos; + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; } static struct scsi_host_template cumanascsi2_template = { .module = THIS_MODULE, - .proc_info = cumanascsi_2_proc_info, + .show_info = cumanascsi_2_show_info, + .write_info = cumanascsi_2_set_proc_info, .name = "Cumana SCSI II", .info = cumanascsi_2_info, .queuecommand = fas216_queue_command, @@ -456,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, goto out_free; ret = request_irq(ec->irq, cumanascsi_2_intr, - IRQF_DISABLED, "cumanascsi2", info); + 0, "cumanascsi2", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index 8e36908415e..5bf3c0d134b 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -422,45 +422,20 @@ eesoxscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) return ret; } -/* Prototype: int eesoxscsi_proc_info(char *buffer, char **start, off_t offset, - * int length, int host_no, int inout) - * Purpose : Return information about the driver to a user process accessing - * the /proc filesystem. - * Params : buffer - a buffer to write information to - * start - a pointer into this buffer set by this routine to the start - * of the required information. - * offset - offset into information that we have read up to. - * length - length of buffer - * host_no - host number to return information for - * inout - 0 for reading, 1 for writing. - * Returns : length of data written to buffer. - */ -int eesoxscsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, - int length, int inout) +static int eesoxscsi_show_info(struct seq_file *m, struct Scsi_Host *host) { struct eesoxscsi_info *info; - char *p = buffer; - int pos; - - if (inout == 1) - return eesoxscsi_set_proc_info(host, buffer, length); info = (struct eesoxscsi_info *)host->hostdata; - p += sprintf(p, "EESOX SCSI driver v%s\n", VERSION); - p += fas216_print_host(&info->info, p); - p += sprintf(p, "Term : o%s\n", + seq_printf(m, "EESOX SCSI driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + seq_printf(m, "Term : o%s\n", info->control & EESOX_TERM_ENABLE ? "n" : "ff"); - p += fas216_print_stats(&info->info, p); - p += fas216_print_devices(&info->info, p); - - *start = buffer + offset; - pos = p - buffer - offset; - if (pos > length) - pos = length; - - return pos; + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; } static ssize_t eesoxscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) @@ -498,7 +473,8 @@ static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, static struct scsi_host_template eesox_template = { .module = THIS_MODULE, - .proc_info = eesoxscsi_proc_info, + .show_info = eesoxscsi_show_info, + .write_info = eesoxscsi_set_proc_info, .name = "EESOX SCSI", .info = eesoxscsi_info, .queuecommand = fas216_queue_command, diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 737554c37d9..b46a6f6c0eb 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2958,9 +2958,9 @@ void fas216_release(struct Scsi_Host *host) queue_free(&info->queues.issue); } -int fas216_print_host(FAS216_Info *info, char *buffer) +void fas216_print_host(FAS216_Info *info, struct seq_file *m) { - return sprintf(buffer, + seq_printf(m, "\n" "Chip : %s\n" " Address: 0x%p\n" @@ -2970,11 +2970,9 @@ int fas216_print_host(FAS216_Info *info, char *buffer) info->scsi.irq, info->scsi.dma); } -int fas216_print_stats(FAS216_Info *info, char *buffer) +void fas216_print_stats(FAS216_Info *info, struct seq_file *m) { - char *p = buffer; - - p += sprintf(p, "\n" + seq_printf(m, "\n" "Command Statistics:\n" " Queued : %u\n" " Issued : %u\n" @@ -2991,38 +2989,33 @@ int fas216_print_stats(FAS216_Info *info, char *buffer) info->stats.writes, info->stats.miscs, info->stats.disconnects, info->stats.aborts, info->stats.bus_resets, info->stats.host_resets); - - return p - buffer; } -int fas216_print_devices(FAS216_Info *info, char *buffer) +void fas216_print_devices(FAS216_Info *info, struct seq_file *m) { struct fas216_device *dev; struct scsi_device *scd; - char *p = buffer; - p += sprintf(p, "Device/Lun TaggedQ Parity Sync\n"); + seq_printf(m, "Device/Lun TaggedQ Parity Sync\n"); shost_for_each_device(scd, info->host) { dev = &info->device[scd->id]; - p += sprintf(p, " %d/%d ", scd->id, scd->lun); + seq_printf(m, " %d/%d ", scd->id, scd->lun); if (scd->tagged_supported) - p += sprintf(p, "%3sabled(%3d) ", + seq_printf(m, "%3sabled(%3d) ", scd->simple_tags ? "en" : "dis", scd->current_tag); else - p += sprintf(p, "unsupported "); + seq_printf(m, "unsupported "); - p += sprintf(p, "%3sabled ", dev->parity_enabled ? "en" : "dis"); + seq_printf(m, "%3sabled ", dev->parity_enabled ? "en" : "dis"); if (dev->sof) - p += sprintf(p, "offset %d, %d ns\n", + seq_printf(m, "offset %d, %d ns\n", dev->sof, dev->period * 4); else - p += sprintf(p, "async\n"); + seq_printf(m, "async\n"); } - - return p - buffer; } EXPORT_SYMBOL(fas216_init); diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index df2e1b3ddfe..c57c16ef819 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h @@ -358,9 +358,9 @@ extern void fas216_remove (struct Scsi_Host *instance); */ extern void fas216_release (struct Scsi_Host *instance); -extern int fas216_print_host(FAS216_Info *info, char *buffer); -extern int fas216_print_stats(FAS216_Info *info, char *buffer); -extern int fas216_print_devices(FAS216_Info *info, char *buffer); +extern void fas216_print_host(FAS216_Info *info, struct seq_file *m); +extern void fas216_print_stats(FAS216_Info *info, struct seq_file *m); +extern void fas216_print_devices(FAS216_Info *info, struct seq_file *m); /* Function: int fas216_eh_abort(struct scsi_cmnd *SCpnt) * Purpose : abort this command diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 48facdc1800..188e734c7ff 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -31,14 +31,12 @@ #define NCR5380_write(reg, value) writeb(value, _base + ((reg) << 2)) #define NCR5380_intr oakscsi_intr #define NCR5380_queue_command oakscsi_queue_command -#define NCR5380_proc_info oakscsi_proc_info +#define NCR5380_show_info oakscsi_show_info +#define NCR5380_write_info oakscsi_write_info #define NCR5380_implementation_fields \ void __iomem *base -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 - #include "../NCR5380.h" #undef START_DMA_INITIATOR_RECEIVE_REG @@ -115,7 +113,8 @@ printk("reading %p len %d\n", addr, len); static struct scsi_host_template oakscsi_template = { .module = THIS_MODULE, - .proc_info = oakscsi_proc_info, + .show_info = oakscsi_show_info, + .write_info = oakscsi_write_info, .name = "Oak 16-bit SCSI", .info = oakscsi_info, .queuecommand = oakscsi_queue_command, diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index 246600b9355..5e1b73e1b74 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -237,32 +237,20 @@ powertecscsi_set_proc_info(struct Scsi_Host *host, char *buffer, int length) * inout - 0 for reading, 1 for writing. * Returns : length of data written to buffer. */ -int powertecscsi_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, - int length, int inout) +static int powertecscsi_show_info(struct seq_file *m, struct Scsi_Host *host) { struct powertec_info *info; - char *p = buffer; - int pos; - - if (inout == 1) - return powertecscsi_set_proc_info(host, buffer, length); info = (struct powertec_info *)host->hostdata; - p += sprintf(p, "PowerTec SCSI driver v%s\n", VERSION); - p += fas216_print_host(&info->info, p); - p += sprintf(p, "Term : o%s\n", + seq_printf(m, "PowerTec SCSI driver v%s\n", VERSION); + fas216_print_host(&info->info, m); + seq_printf(m, "Term : o%s\n", info->term_ctl ? "n" : "ff"); - p += fas216_print_stats(&info->info, p); - p += fas216_print_devices(&info->info, p); - - *start = buffer + offset; - pos = p - buffer - offset; - if (pos > length) - pos = length; - - return pos; + fas216_print_stats(&info->info, m); + fas216_print_devices(&info->info, m); + return 0; } static ssize_t powertecscsi_show_term(struct device *dev, struct device_attribute *attr, char *buf) @@ -291,7 +279,8 @@ static DEVICE_ATTR(bus_term, S_IRUGO | S_IWUSR, static struct scsi_host_template powertecscsi_template = { .module = THIS_MODULE, - .proc_info = powertecscsi_proc_info, + .show_info = powertecscsi_show_info, + .write_info = powertecscsi_set_proc_info, .name = "PowerTec SCSI", .info = powertecscsi_info, .queuecommand = fas216_queue_command, @@ -369,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec, goto out_free; ret = request_irq(ec->irq, powertecscsi_intr, - IRQF_DISABLED, "powertec", info); + 0, "powertec", info); if (ret) { printk("scsi%d: IRQ%d not free: %d\n", host->host_no, ec->irq, ret); diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 2db79b469d9..1814aa20b72 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) return 0; if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { - TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun); return 1; } @@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); - TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; @@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); set_bit(cmd->tag, ta->allocated); ta->nr_allocated++; - TAG_PRINTK("scsi%d: using tag %d for target %d lun %d " + dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated); @@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); - TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", @@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit(cmd->tag, ta->allocated); ta->nr_allocated--; - TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n", + dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); } } @@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; cmd->SCp.buffers_residual && virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { - MER_PRINTK("VTOP(%p) == %08lx -> merging\n", + dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); #if (NDEBUG & NDEBUG_MERGING) ++cnt; @@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) } #if (NDEBUG & NDEBUG_MERGING) if (oldlen != cmd->SCp.this_residual) - MER_PRINTK("merged %d buffers from %p, new length %08x\n", + dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", cnt, cmd->SCp.ptr, cmd->SCp.this_residual); #endif } @@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) } } -#else /* !NDEBUG */ - -/* dummies... */ -static inline void NCR5380_print(struct Scsi_Host *instance) -{ -}; -static inline void NCR5380_print_phase(struct Scsi_Host *instance) -{ -}; - #endif /* @@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void) { static int done = 0; if (!done) { - INI_PRINTK("scsi : NCR5380_all_init()\n"); + dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); done = 1; } } @@ -719,119 +709,94 @@ static void __init NCR5380_print_options(struct Scsi_Host *instance) * Inputs : instance, pointer to this instance. */ -static void NCR5380_print_status(struct Scsi_Host *instance) +static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) { - char *pr_bfr; - char *start; - int len; - - NCR_PRINT(NDEBUG_ANY); - NCR_PRINT_PHASE(NDEBUG_ANY); - - pr_bfr = (char *)__get_free_page(GFP_ATOMIC); - if (!pr_bfr) { - printk("NCR5380_print_status: no memory for print buffer\n"); - return; - } - len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0); - pr_bfr[len] = 0; - printk("\n%s\n", pr_bfr); - free_page((unsigned long)pr_bfr); + int i, s; + unsigned char *command; + printk("scsi%d: destination target %d, lun %d\n", + H_NO(cmd), cmd->device->id, cmd->device->lun); + printk(KERN_CONT " command = "); + command = cmd->cmnd; + printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]); + for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) + printk(KERN_CONT " %02x", command[i]); + printk("\n"); } - -/******************************************/ -/* - * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] - * - * *buffer: I/O buffer - * **start: if inout == FALSE pointer into buffer where user read should start - * offset: current offset - * length: length of buffer - * hostno: Scsi_Host host_no - * inout: TRUE - user is writing; FALSE - user is reading - * - * Return the number of bytes read from or written -*/ - -#undef SPRINTF -#define SPRINTF(fmt,args...) \ - do { \ - if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ - pos += sprintf(pos, fmt , ## args); \ - } while(0) -static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length); - -static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, - char **start, off_t offset, int length, int inout) +static void NCR5380_print_status(struct Scsi_Host *instance) { - char *pos = buffer; struct NCR5380_hostdata *hostdata; Scsi_Cmnd *ptr; unsigned long flags; - off_t begin = 0; -#define check_offset() \ - do { \ - if (pos - buffer < offset - begin) { \ - begin += pos - buffer; \ - pos = buffer; \ - } \ - } while (0) + + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); hostdata = (struct NCR5380_hostdata *)instance->hostdata; - if (inout) /* Has data been written to the file ? */ - return -ENOSYS; /* Currently this is a no-op */ - SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); - check_offset(); + printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); local_irq_save(flags); - SPRINTF("NCR5380: coroutine is%s running.\n", + printk("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't"); - check_offset(); if (!hostdata->connected) - SPRINTF("scsi%d: no currently connected command\n", HOSTNO); + printk("scsi%d: no currently connected command\n", HOSTNO); else - pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, - pos, buffer, length); - SPRINTF("scsi%d: issue_queue\n", HOSTNO); - check_offset(); - for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) { - pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); - check_offset(); - } + lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected); + printk("scsi%d: issue_queue\n", HOSTNO); + for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) + lprint_Scsi_Cmnd(ptr); - SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); - check_offset(); + printk("scsi%d: disconnected_queue\n", HOSTNO); for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) { - pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); - check_offset(); - } + ptr = NEXT(ptr)) + lprint_Scsi_Cmnd(ptr); local_irq_restore(flags); - *start = buffer + (offset - begin); - if (pos - buffer < offset - begin) - return 0; - else if (pos - buffer - (offset - begin) < length) - return pos - buffer - (offset - begin); - return length; + printk("\n"); } -static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length) +static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) { int i, s; unsigned char *command; - SPRINTF("scsi%d: destination target %d, lun %d\n", + seq_printf(m, "scsi%d: destination target %d, lun %d\n", H_NO(cmd), cmd->device->id, cmd->device->lun); - SPRINTF(" command = "); + seq_printf(m, " command = "); command = cmd->cmnd; - SPRINTF("%2d (0x%02x)", command[0], command[0]); + seq_printf(m, "%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - SPRINTF(" %02x", command[i]); - SPRINTF("\n"); - return pos; + seq_printf(m, " %02x", command[i]); + seq_printf(m, "\n"); } +static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata; + Scsi_Cmnd *ptr; + unsigned long flags; + + hostdata = (struct NCR5380_hostdata *)instance->hostdata; + + seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); + local_irq_save(flags); + seq_printf(m, "NCR5380: coroutine is%s running.\n", + main_running ? "" : "n't"); + if (!hostdata->connected) + seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); + else + show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); + seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); + for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) + show_Scsi_Cmnd(ptr, m); + + seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); + for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; + ptr = NEXT(ptr)) + show_Scsi_Cmnd(ptr, m); + + local_irq_restore(flags); + return 0; +} /* * Function : void NCR5380_init (struct Scsi_Host *instance) @@ -1009,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) } local_irq_restore(flags); - QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), + dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom @@ -1079,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work) done = 1; if (!hostdata->connected) { - MAIN_PRINTK("scsi%d: not connected\n", HOSTNO); + dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO); /* * Search through the issue_queue for a command destined * for a target that's not busy. @@ -1132,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work) * On failure, we must add the command back to the * issue queue so we can keep trying. */ - MAIN_PRINTK("scsi%d: main(): command for target %d " + dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->device->id, tmp->device->lun); /* @@ -1165,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work) #endif falcon_dont_release--; local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main(): select() failed, " + dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; @@ -1180,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work) #endif ) { local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main: performing information transfer\n", + dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); - MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); + dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); @@ -1229,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) (BASR_PHASE_MATCH|BASR_ACK)) { saved_data = NCR5380_read(INPUT_DATA_REG); overrun = 1; - DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); + dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO); } } } - DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", + dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -1254,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { cnt = toPIO = atari_read_overruns; if (overrun) { - DMA_PRINTK("Got an input overrun, using saved byte\n"); + dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); *(*data)++ = saved_data; (*count)--; cnt--; toPIO--; } - DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); + dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); NCR5380_transfer_pio(instance, &p, &cnt, data); *count -= toPIO - cnt; } @@ -1286,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) int done = 1, handled = 0; unsigned char basr; - INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); - INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); + dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { - NCR_PRINT(NDEBUG_INTR); + NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; ENABLE_IRQ(); - INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { - INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* @@ -1323,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { - INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; ENABLE_IRQ(); @@ -1348,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) } if (!done) { - INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } @@ -1421,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) unsigned long flags; hostdata->restart_select = 0; - NCR_PRINT(NDEBUG_ARBITRATION); - ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, + NCR5380_dprint(NDEBUG_ARBITRATION, instance); + dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* @@ -1467,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) ; #endif - ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); @@ -1488,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } @@ -1503,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } @@ -1526,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) return -1; } - ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting @@ -1586,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) udelay(1); - SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); + dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual @@ -1642,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } @@ -1655,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } @@ -1681,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; - SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", + dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); @@ -1701,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS @@ -1762,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; - HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { - PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); - NCR_PRINT_PHASE(NDEBUG_PIO); + dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); + NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } @@ -1789,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ) ; - HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : @@ -1828,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, } } while (--c); - PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); + dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; @@ -1942,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, if (atari_read_overruns && (p & SR_IO)) c -= atari_read_overruns; - DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", + dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", d); @@ -2022,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; - NCR_PRINT_PHASE(NDEBUG_INFORMATION); + NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } if (sink && (phase != PHASE_MSGOUT)) { @@ -2064,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) * they are at contiguous physical addresses. */ merge_contiguous_buffers(cmd); - INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", + dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } @@ -2148,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - LNK_PRINTK("scsi%d: target %d lun %d linked command " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ @@ -2173,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - LNK_PRINTK("scsi%d: target %d lun %d linked request " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS @@ -2190,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* ++guenther: possible race with Falcon locking */ falcon_dont_release++; hostdata->connected = NULL; - QU_PRINTK("scsi%d: command for target %d, lun %d " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag(cmd); @@ -2204,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; - TAG_PRINTK("scsi%d: target %d lun %d returned " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); @@ -2249,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); + dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); local_irq_save(flags); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (Scsi_Cmnd *) cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: REQUEST SENSE added to head of " + dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ @@ -2302,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; - TAG_PRINTK("scsi%d: target %d lun %d rejected " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2319,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: command for target %d lun %d was " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2369,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); + dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, + dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= @@ -2387,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: message received, residual %d\n", + dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { @@ -2476,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) break; default: printk("scsi%d: unknown phase\n", HOSTNO); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ @@ -2518,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - RSL_PRINTK("scsi%d: reselect\n", HOSTNO); + dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2569,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; - TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " + dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif @@ -2623,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = tmp; - RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", + dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); falcon_dont_release--; } @@ -2665,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", HOSTNO); - ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, + dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -2678,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) if (hostdata->connected == cmd) { - ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. @@ -2708,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd) local_irq_restore(flags); cmd->scsi_done(cmd); falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); - return SCSI_ABORT_ERROR; + return FAILED; } } #endif @@ -2730,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd) SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", + dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } } @@ -2753,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd) if (hostdata->connected) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); - return SCSI_ABORT_SNOOZE; + dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); + return FAILED; } /* @@ -2786,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd) tmp = NEXT(tmp)) { if (cmd == tmp) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select(instance, cmd, (int)cmd->tag)) - return SCSI_ABORT_BUSY; + return FAILED; - ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); do_abort(instance); @@ -2816,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) local_irq_restore(flags); tmp->scsi_done(tmp); falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } } } @@ -2841,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) */ falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_NOT_RUNNING; + return FAILED; } @@ -2850,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) * * Purpose : reset the SCSI bus. * - * Returns : SCSI_RESET_WAKEUP + * Returns : SUCCESS or FAILURE * */ @@ -2859,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; -#if 1 +#if defined(RESET_RUN_DONE) Scsi_Cmnd *connected, *disconnected_queue; #endif @@ -2884,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) * through anymore ... */ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ + /* MSch 20140115 - looking at the generic NCR5380 driver, all of this + * should go. + * Catch-22: if we don't clear all queues, the SCSI driver lock will + * not be reset by atari_scsi_reset()! + */ + +#if defined(RESET_RUN_DONE) + /* XXX Should now be done by midlevel code, but it's broken XXX */ /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ @@ -2915,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) */ if ((cmd = connected)) { - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done(cmd); } @@ -2927,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) cmd->scsi_done(cmd); } if (i > 0) - ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); + dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); /* The Falcon lock should be released after a reset... */ @@ -2940,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ - return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; + return SUCCESS; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ @@ -2967,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) */ if (hostdata->issue_queue) - ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) - ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; @@ -2988,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ - return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; + return SUCCESS; #endif /* 1 */ } diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index df740cbbaef..b522134528d 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -67,12 +67,6 @@ #include <linux/module.h> -#define NDEBUG (0) - -#define NDEBUG_ABORT 0x00100000 -#define NDEBUG_TAGS 0x00200000 -#define NDEBUG_MERGING 0x00400000 - #define AUTOSENSE /* For the Atari version, use only polled IO or REAL_DMA */ #define REAL_DMA @@ -90,6 +84,7 @@ #include <linux/init.h> #include <linux/nvram.h> #include <linux/bitops.h> +#include <linux/wait.h> #include <asm/setup.h> #include <asm/atarihw.h> @@ -313,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) dma_stat = tt_scsi_dma.dma_ctrl; - INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n", + dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n", atari_scsi_host->host_no, dma_stat & 0xff); /* Look if it was the DMA that has interrupted: First possibility @@ -339,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); - DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", + dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); if ((signed int)atari_dma_residual < 0) @@ -370,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) * other command. These shouldn't disconnect anyway. */ if (atari_dma_residual & 0x1ff) { - DMA_PRINTK("SCSI DMA: DMA bug corrected, " + dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, " "difference %ld bytes\n", 512 - (atari_dma_residual & 0x1ff)); atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; @@ -437,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) "ST-DMA fifo\n", transferred & 15); atari_dma_residual = HOSTDATA_DMALEN - transferred; - DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", + dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); } else atari_dma_residual = 0; @@ -473,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void) /* there are 'nr' bytes left for the last long address before the DMA pointer */ phys_dst ^= nr; - DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", + dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", nr, phys_dst); /* The content of the DMA pointer is a physical address! */ dst = phys_to_virt(phys_dst); - DMA_PRINTK(" = virt addr %p\n", dst); + dprintk(NDEBUG_DMA, " = virt addr %p\n", dst); for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) *dst++ = *src++; } @@ -549,8 +544,10 @@ static void falcon_get_lock(void) local_irq_save(flags); - while (!in_irq() && falcon_got_lock && stdma_others_waiting()) - sleep_on(&falcon_fairness_wait); + wait_event_cmd(falcon_fairness_wait, + in_interrupt() || !falcon_got_lock || !stdma_others_waiting(), + local_irq_restore(flags), + local_irq_save(flags)); while (!falcon_got_lock) { if (in_irq()) @@ -562,7 +559,10 @@ static void falcon_get_lock(void) falcon_trying_lock = 0; wake_up(&falcon_try_wait); } else { - sleep_on(&falcon_try_wait); + wait_event_cmd(falcon_try_wait, + falcon_got_lock && !falcon_trying_lock, + local_irq_restore(flags), + local_irq_save(flags)); } } @@ -633,7 +633,7 @@ static int __init atari_scsi_detect(struct scsi_host_template *host) "double buffer\n"); return 0; } - atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); + atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); atari_dma_orig_addr = 0; } #endif @@ -821,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd) } else { atari_turnon_irq(IRQ_MFP_FSCSI); } - if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) + if (rv == SUCCESS) falcon_release_lock_if_possible(hostdata); return rv; @@ -877,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, { unsigned long addr = virt_to_phys(data); - DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " + dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " "dir = %d\n", instance->host_no, data, addr, count, dir); if (!IS_A_TT() && !STRAM_ADDR(addr)) { @@ -1057,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, possible_len = limit; if (possible_len != wanted_len) - DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " + dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes " "instead of %ld\n", possible_len, wanted_len); return possible_len; @@ -1100,7 +1100,7 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) #include "atari_NCR5380.c" static struct scsi_host_template driver_template = { - .proc_info = atari_scsi_proc_info, + .show_info = atari_scsi_show_info, .name = "Atari native SCSI", .detect = atari_scsi_detect, .release = atari_scsi_release, diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h index bd52df78b20..3299d91d733 100644 --- a/drivers/scsi/atari_scsi.h +++ b/drivers/scsi/atari_scsi.h @@ -47,132 +47,13 @@ #define NCR5380_intr atari_scsi_intr #define NCR5380_queue_command atari_scsi_queue_command #define NCR5380_abort atari_scsi_abort -#define NCR5380_proc_info atari_scsi_proc_info +#define NCR5380_show_info atari_scsi_show_info #define NCR5380_dma_read_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 0) #define NCR5380_dma_write_setup(inst,d,c) atari_scsi_dma_setup (inst, d, c, 1) #define NCR5380_dma_residual(inst) atari_scsi_dma_residual( inst ) #define NCR5380_dma_xfer_len(i,cmd,phase) \ atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) -/* former generic SCSI error handling stuff */ - -#define SCSI_ABORT_SNOOZE 0 -#define SCSI_ABORT_SUCCESS 1 -#define SCSI_ABORT_PENDING 2 -#define SCSI_ABORT_BUSY 3 -#define SCSI_ABORT_NOT_RUNNING 4 -#define SCSI_ABORT_ERROR 5 - -#define SCSI_RESET_SNOOZE 0 -#define SCSI_RESET_PUNT 1 -#define SCSI_RESET_SUCCESS 2 -#define SCSI_RESET_PENDING 3 -#define SCSI_RESET_WAKEUP 4 -#define SCSI_RESET_NOT_RUNNING 5 -#define SCSI_RESET_ERROR 6 - -#define SCSI_RESET_SYNCHRONOUS 0x01 -#define SCSI_RESET_ASYNCHRONOUS 0x02 -#define SCSI_RESET_SUGGEST_BUS_RESET 0x04 -#define SCSI_RESET_SUGGEST_HOST_RESET 0x08 - -#define SCSI_RESET_BUS_RESET 0x100 -#define SCSI_RESET_HOST_RESET 0x200 -#define SCSI_RESET_ACTION 0xff - -/* Debugging printk definitions: - * - * ARB -> arbitration - * ASEN -> auto-sense - * DMA -> DMA - * HSH -> PIO handshake - * INF -> information transfer - * INI -> initialization - * INT -> interrupt - * LNK -> linked commands - * MAIN -> NCR5380_main() control flow - * NDAT -> no data-out phase - * NWR -> no write commands - * PIO -> PIO transfers - * PDMA -> pseudo DMA (unused on Atari) - * QU -> queues - * RSL -> reselections - * SEL -> selections - * USL -> usleep cpde (unused on Atari) - * LBS -> last byte sent (unused on Atari) - * RSS -> restarting of selections - * EXT -> extended messages - * ABRT -> aborting and resetting - * TAG -> queue tag handling - * MER -> merging of consec. buffers - * - */ - -#define dprint(flg, format...) \ -({ \ - if (NDEBUG & (flg)) \ - printk(KERN_DEBUG format); \ -}) - -#define ARB_PRINTK(format, args...) \ - dprint(NDEBUG_ARBITRATION, format , ## args) -#define ASEN_PRINTK(format, args...) \ - dprint(NDEBUG_AUTOSENSE, format , ## args) -#define DMA_PRINTK(format, args...) \ - dprint(NDEBUG_DMA, format , ## args) -#define HSH_PRINTK(format, args...) \ - dprint(NDEBUG_HANDSHAKE, format , ## args) -#define INF_PRINTK(format, args...) \ - dprint(NDEBUG_INFORMATION, format , ## args) -#define INI_PRINTK(format, args...) \ - dprint(NDEBUG_INIT, format , ## args) -#define INT_PRINTK(format, args...) \ - dprint(NDEBUG_INTR, format , ## args) -#define LNK_PRINTK(format, args...) \ - dprint(NDEBUG_LINKED, format , ## args) -#define MAIN_PRINTK(format, args...) \ - dprint(NDEBUG_MAIN, format , ## args) -#define NDAT_PRINTK(format, args...) \ - dprint(NDEBUG_NO_DATAOUT, format , ## args) -#define NWR_PRINTK(format, args...) \ - dprint(NDEBUG_NO_WRITE, format , ## args) -#define PIO_PRINTK(format, args...) \ - dprint(NDEBUG_PIO, format , ## args) -#define PDMA_PRINTK(format, args...) \ - dprint(NDEBUG_PSEUDO_DMA, format , ## args) -#define QU_PRINTK(format, args...) \ - dprint(NDEBUG_QUEUES, format , ## args) -#define RSL_PRINTK(format, args...) \ - dprint(NDEBUG_RESELECTION, format , ## args) -#define SEL_PRINTK(format, args...) \ - dprint(NDEBUG_SELECTION, format , ## args) -#define USL_PRINTK(format, args...) \ - dprint(NDEBUG_USLEEP, format , ## args) -#define LBS_PRINTK(format, args...) \ - dprint(NDEBUG_LAST_BYTE_SENT, format , ## args) -#define RSS_PRINTK(format, args...) \ - dprint(NDEBUG_RESTART_SELECT, format , ## args) -#define EXT_PRINTK(format, args...) \ - dprint(NDEBUG_EXTENDED, format , ## args) -#define ABRT_PRINTK(format, args...) \ - dprint(NDEBUG_ABORT, format , ## args) -#define TAG_PRINTK(format, args...) \ - dprint(NDEBUG_TAGS, format , ## args) -#define MER_PRINTK(format, args...) \ - dprint(NDEBUG_MERGING, format , ## args) - -/* conditional macros for NCR5380_print_{,phase,status} */ - -#define NCR_PRINT(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) - -#define NCR_PRINT_PHASE(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) - -#define NCR_PRINT_STATUS(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) - - #endif /* ndef ASM */ #endif /* ATARI_SCSI_H */ diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index cfc73041f10..a795d81ef87 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -3099,38 +3099,14 @@ static const char *atp870u_info(struct Scsi_Host *notused) return buffer; } -#define BLS buffer + len + size -static int atp870u_proc_info(struct Scsi_Host *HBAptr, char *buffer, - char **start, off_t offset, int length, int inout) +static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) { - static u8 buff[512]; - int size = 0; - int len = 0; - off_t begin = 0; - off_t pos = 0; - - if (inout) - return -EINVAL; - if (offset == 0) - memset(buff, 0, sizeof(buff)); - size += sprintf(BLS, "ACARD AEC-671X Driver Version: 2.6+ac\n"); - len += size; - pos = begin + len; - size = 0; - - size += sprintf(BLS, "\n"); - size += sprintf(BLS, "Adapter Configuration:\n"); - size += sprintf(BLS, " Base IO: %#.4lx\n", HBAptr->io_port); - size += sprintf(BLS, " IRQ: %d\n", HBAptr->irq); - len += size; - pos = begin + len; - - *start = buffer + (offset - begin); /* Start of wanted data */ - len -= (offset - begin); /* Start slop */ - if (len > length) { - len = length; /* Ending slop */ - } - return (len); + seq_printf(m, "ACARD AEC-671X Driver Version: 2.6+ac\n"); + seq_printf(m, "\n"); + seq_printf(m, "Adapter Configuration:\n"); + seq_printf(m, " Base IO: %#.4lx\n", HBAptr->io_port); + seq_printf(m, " IRQ: %d\n", HBAptr->irq); + return 0; } @@ -3168,8 +3144,6 @@ static void atp870u_remove (struct pci_dev *pdev) atp870u_free_tables(pshost); printk(KERN_INFO "scsi_host_put : %p\n",pshost); scsi_host_put(pshost); - printk(KERN_INFO "pci_set_drvdata : %p\n",pdev); - pci_set_drvdata(pdev, NULL); } MODULE_LICENSE("GPL"); @@ -3177,7 +3151,7 @@ static struct scsi_host_template atp870u_template = { .module = THIS_MODULE, .name = "atp870u" /* name */, .proc_name = "atp870u", - .proc_info = atp870u_proc_info, + .show_info = atp870u_show_info, .info = atp870u_info /* info */, .queuecommand = atp870u_queuecommand /* queuecommand */, .eh_abort_handler = atp870u_abort /* abort */, diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index f1733dfa3ae..860f527d8f2 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q) /*ISCSI */ +struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + bool enable; + u32 min_eqd; /* in usecs */ + u32 max_eqd; /* in usecs */ + u32 prev_eqd; /* in usecs */ + u32 et_eqd; /* configured val when aic is off */ + ulong jiffs; + u64 eq_prev; /* Used to calculate eqe */ +}; + struct be_eq_obj { bool todo_mcc_cq; bool todo_cq; + u32 cq_count; struct be_queue_info q; struct beiscsi_hba *phba; struct be_queue_info *cq; @@ -98,6 +109,14 @@ struct be_mcc_obj { struct be_queue_info cq; }; +struct beiscsi_mcc_tag_state { +#define MCC_TAG_STATE_COMPLETED 0x00 +#define MCC_TAG_STATE_RUNNING 0x01 +#define MCC_TAG_STATE_TIMEOUT 0x02 + uint8_t tag_state; + struct be_dma_mem tag_mem_state; +}; + struct be_ctrl_info { u8 __iomem *csr; u8 __iomem *db; /* Door Bell */ @@ -122,13 +141,16 @@ struct be_ctrl_info { unsigned short mcc_alloc_index; unsigned short mcc_free_index; unsigned int mcc_tag_available; + + struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1]; }; #include "be_cmds.h" #define PAGE_SHIFT_4K 12 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) -#define mcc_timeout 120000 /* 5s timeout */ +#define mcc_timeout 120000 /* 12s timeout */ +#define BEISCSI_LOGOUT_SYNC_DELAY 250 /* Returns number of pages spanned by the data starting at the given addr */ #define PAGES_4K_SPANNED(_address, size) \ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 5c87768c109..1432ed5e9fc 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -17,9 +17,9 @@ #include <scsi/iscsi_proto.h> +#include "be_main.h" #include "be.h" #include "be_mgmt.h" -#include "be_main.h" int beiscsi_pci_soft_reset(struct beiscsi_hba *phba) { @@ -138,7 +138,7 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) * @phba: Driver private structure * @tag: Tag for the MBX Command * @wrb: the WRB used for the MBX Command - * @cmd_hdr: IOCTL Hdr for the MBX Cmd + * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd * * Waits for MBX completion with the passed TAG. * @@ -148,17 +148,25 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) **/ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint32_t tag, struct be_mcc_wrb **wrb, - void *cmd_hdr) + struct be_dma_mem *mbx_cmd_mem) { int rc = 0; uint32_t mcc_tag_response; uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_mcc_wrb *temp_wrb; - struct be_cmd_req_hdr *ioctl_hdr; + struct be_cmd_req_hdr *mbx_hdr; + struct be_cmd_resp_hdr *mbx_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; - if (beiscsi_error(phba)) - return -EIO; + if (beiscsi_error(phba)) { + free_mcc_tag(&phba->ctrl, tag); + return -EPERM; + } + + /* Set MBX Tag state to Active */ + spin_lock(&phba->ctrl.mbox_lock); + phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING; + spin_unlock(&phba->ctrl.mbox_lock); /* wait for the mccq completion */ rc = wait_event_interruptible_timeout( @@ -168,43 +176,76 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, BEISCSI_HOST_MBX_TIMEOUT)); if (rc <= 0) { + struct be_dma_mem *tag_mem; + /* Set MBX Tag state to timeout */ + spin_lock(&phba->ctrl.mbox_lock); + phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT; + spin_unlock(&phba->ctrl.mbox_lock); + + /* Store resource addr to be freed later */ + tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; + if (mbx_cmd_mem) { + tag_mem->size = mbx_cmd_mem->size; + tag_mem->va = mbx_cmd_mem->va; + tag_mem->dma = mbx_cmd_mem->dma; + } else + tag_mem->size = 0; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Completion timed out\n"); - rc = -EAGAIN; - goto release_mcc_tag; - } else + return -EBUSY; + } else { rc = 0; + /* Set MBX Tag state to completed */ + spin_lock(&phba->ctrl.mbox_lock); + phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; + spin_unlock(&phba->ctrl.mbox_lock); + } mcc_tag_response = phba->ctrl.mcc_numtag[tag]; status = (mcc_tag_response & CQE_STATUS_MASK); addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT); - if (cmd_hdr) { - ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr; + if (mbx_cmd_mem) { + mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va; } else { wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT; temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num); - ioctl_hdr = embedded_payload(temp_wrb); + mbx_hdr = embedded_payload(temp_wrb); if (wrb) *wrb = temp_wrb; } if (status || addl_status) { - beiscsi_log(phba, KERN_ERR, + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Failed for " "Subsys : %d Opcode : %d with " "Status : %d and Extd_Status : %d\n", - ioctl_hdr->subsystem, - ioctl_hdr->opcode, + mbx_hdr->subsystem, + mbx_hdr->opcode, status, addl_status); - rc = -EAGAIN; + + if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { + mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr; + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT | BEISCSI_LOG_EH | + BEISCSI_LOG_CONFIG, + "BC_%d : Insufficent Buffer Error " + "Resp_Len : %d Actual_Resp_Len : %d\n", + mbx_resp_hdr->response_length, + mbx_resp_hdr->actual_resp_len); + + rc = -EAGAIN; + goto release_mcc_tag; + } + rc = -EIO; } release_mcc_tag: @@ -267,6 +308,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct be_cmd_req_hdr *hdr = embedded_payload(wrb); + struct be_cmd_resp_hdr *resp_hdr; be_dws_le_to_cpu(compl, 4); @@ -284,6 +326,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, hdr->subsystem, hdr->opcode, compl_status, extd_status); + if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) { + resp_hdr = (struct be_cmd_resp_hdr *) hdr; + if (resp_hdr->response_length) + return 0; + } return -EBUSY; } return 0; @@ -292,6 +339,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, struct be_mcc_compl *compl) { + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); u16 compl_status, extd_status; unsigned short tag; @@ -311,7 +359,32 @@ int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000); ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8; ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF); - wake_up_interruptible(&ctrl->mcc_wait[tag]); + + if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) { + wake_up_interruptible(&ctrl->mcc_wait[tag]); + } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) { + struct be_dma_mem *tag_mem; + tag_mem = &ctrl->ptag_state[tag].tag_mem_state; + + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG, + "BC_%d : MBX Completion for timeout Command " + "from FW\n"); + /* Check if memory needs to be freed */ + if (tag_mem->size) + pci_free_consistent(ctrl->pdev, tag_mem->size, + tag_mem->va, tag_mem->dma); + + /* Change tag state */ + spin_lock(&phba->ctrl.mbox_lock); + ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; + spin_unlock(&phba->ctrl.mbox_lock); + + /* Free MCC Tag */ + free_mcc_tag(ctrl, tag); + } + return 0; } @@ -327,54 +400,53 @@ static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba) return NULL; } -static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) +/** + * be2iscsi_fail_session(): Closing session with appropriate error + * @cls_session: ptr to session + * + * Depending on adapter state appropriate error flag is passed. + **/ +void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) { + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint32_t iscsi_err_flag; + + if (phba->state & BE_ADAPTER_STATE_SHUTDOWN) + iscsi_err_flag = ISCSI_ERR_INVALID_HOST; + else + iscsi_err_flag = ISCSI_ERR_CONN_FAILED; + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } void beiscsi_async_link_state_process(struct beiscsi_hba *phba, struct be_async_event_link_state *evt) { - switch (evt->port_link_status) { - case ASYNC_EVENT_LINK_DOWN: + if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) { + phba->state = BE_ADAPTER_LINK_DOWN; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link Down on Physical Port %d\n", + "BC_%d : Link Down on Port %d\n", evt->physical_port); - phba->state |= BE_ADAPTER_LINK_DOWN; iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); - break; - case ASYNC_EVENT_LINK_UP: - phba->state = BE_ADAPTER_UP; - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link UP on Physical Port %d\n", - evt->physical_port); - break; - default: + } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { + phba->state = BE_ADAPTER_LINK_UP; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Unexpected Async Notification %d on" - "Physical Port %d\n", - evt->port_link_status, + "BC_%d : Link UP on Port %d\n", evt->physical_port); } } -static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm, - u16 num_popped) -{ - u32 val = 0; - val |= qid & DB_CQ_RING_ID_MASK; - if (arm) - val |= 1 << DB_CQ_REARM_SHIFT; - val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; - iowrite32(val, phba->db_va + DB_CQ_OFFSET); -} - - int beiscsi_process_mcc(struct beiscsi_hba *phba) { struct be_mcc_compl *compl; @@ -405,7 +477,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba) } if (num) - beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num); + hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0); spin_unlock_bh(&phba->ctrl.mcc_cq_lock); return status; @@ -477,33 +549,47 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba) **/ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) { +#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */ void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - int wait = 0; + unsigned long timeout; + bool read_flag = false; + int ret = 0, i; u32 ready; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q); - do { + if (beiscsi_error(phba)) + return -EIO; - if (beiscsi_error(phba)) - return -EIO; + timeout = jiffies + (HZ * 110); - ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; - if (ready) - break; + do { + for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) { + ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; + if (ready) { + read_flag = true; + break; + } + mdelay(1); + } - if (wait > BEISCSI_HOST_MBX_TIMEOUT) { - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, - "BC_%d : FW Timed Out\n"); + if (!read_flag) { + wait_event_timeout(rdybit_check_q, + (read_flag != true), + HZ * 5); + } + } while ((time_before(jiffies, timeout)) && !read_flag); + + if (!read_flag) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, + "BC_%d : FW Timed Out\n"); phba->fw_timeout = true; beiscsi_ue_detect(phba); - return -EBUSY; - } + ret = -EBUSY; + } - mdelay(1); - wait++; - } while (true); - return 0; + return ret; } /* @@ -527,6 +613,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) struct be_mcc_compl *compl = &mbox->compl; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val &= ~MPU_MAILBOX_DB_RDY_MASK; val |= MPU_MAILBOX_DB_HI_MASK; val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -580,6 +670,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba) struct be_mcc_compl *compl = &mbox->compl; struct be_ctrl_info *ctrl = &phba->ctrl; + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -682,7 +776,7 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba) struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; struct be_mcc_wrb *wrb; - BUG_ON(atomic_read(&mccq->used) >= mccq->len); + WARN_ON(atomic_read(&mccq->used) >= mccq->len); wrb = queue_head_node(mccq); memset(wrb, 0, sizeof(*wrb)); wrb->tag0 = (mccq->head & 0x000000FF) << 16; @@ -732,6 +826,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, return status; } +/** + * be_cmd_fw_initialize()- Initialize FW + * @ctrl: Pointer to function control structure + * + * Send FW initialize pattern for the function. + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); @@ -762,6 +866,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) return status; } +/** + * be_cmd_fw_uninit()- Uinitialize FW + * @ctrl: Pointer to function control structure + * + * Send FW uninitialize pattern for the function + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + int status; + u8 *endian_check; + + spin_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + endian_check = (u8 *) wrb; + *endian_check++ = 0xFF; + *endian_check++ = 0xAA; + *endian_check++ = 0xBB; + *endian_check++ = 0xFF; + *endian_check++ = 0xFF; + *endian_check++ = 0xCC; + *endian_check++ = 0xDD; + *endian_check = 0xFF; + + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + + status = be_mbox_notify(ctrl); + if (status) + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : be_cmd_fw_uninit Failed\n"); + + spin_unlock(&ctrl->mbox_lock); + return status; +} + int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int coalesce_wm) @@ -783,20 +928,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, OPCODE_COMMON_CQ_CREATE, sizeof(*req)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (chip_skh_r(ctrl->pdev)) { - req->hdr.version = MBX_CMD_VER2; - req->page_size = 1; - AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, - ctxt, coalesce_wm); - AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, - ctxt, no_delay); - AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, - __ilog2_u32(cq->len / 256)); - AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); - AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); @@ -809,6 +941,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, func, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + } else { + req->hdr.version = MBX_CMD_VER2; + req->page_size = 1; + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, + ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -941,14 +1086,34 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, return status; } +/** + * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter + * @ctrl: ptr to ctrl_info + * @cq: Completion Queue + * @dq: Default Queue + * @lenght: ring size + * @entry_size: size of each entry in DEFQ + * @is_header: Header or Data DEFQ + * @ulp_num: Bind to which ULP + * + * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted + * on this queue by the FW + * + * return + * Success: 0 + * Failure: Non-Zero Value + * + **/ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *dq, int length, - int entry_size) + int entry_size, uint8_t is_header, + uint8_t ulp_num) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_defq_create_req *req = embedded_payload(wrb); struct be_dma_mem *q_mem = &dq->dma_mem; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); void *ctxt = &req->context; int status; @@ -961,17 +1126,41 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, - 1); - AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, - PCI_FUNC(ctrl->pdev->devfn)); - AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, - be_encoded_q_len(length / sizeof(struct phys_addr))); - AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, - ctxt, entry_size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, - cq->id); + if (phba->fw_config.dual_ulp_aware) { + req->ulp_num = ulp_num; + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); + } + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + cq_id_recv, ctxt, cq->id); + } else { + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + cq_id_recv, ctxt, cq->id); + } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -979,22 +1168,53 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, status = be_mbox_notify(ctrl); if (!status) { + struct be_ring *defq_ring; struct be_defq_create_resp *resp = embedded_payload(wrb); dq->id = le16_to_cpu(resp->id); dq->created = true; + if (is_header) + defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num]; + else + defq_ring = &phba->phwi_ctrlr-> + default_pdu_data[ulp_num]; + + defq_ring->id = dq->id; + + if (!phba->fw_config.dual_ulp_aware) { + defq_ring->ulp_num = BEISCSI_ULP0; + defq_ring->doorbell_offset = DB_RXULP0_OFFSET; + } else { + defq_ring->ulp_num = resp->ulp_num; + defq_ring->doorbell_offset = resp->doorbell_offset; + } } spin_unlock(&ctrl->mbox_lock); return status; } -int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, - struct be_queue_info *wrbq) +/** + * be_cmd_wrbq_create()- Create WRBQ + * @ctrl: ptr to ctrl_info + * @q_mem: memory details for the queue + * @wrbq: queue info + * @pwrb_context: ptr to wrb_context + * @ulp_num: ULP on which the WRBQ is to be created + * + * Create WRBQ on the passed ULP_NUM. + * + **/ +int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem, + struct be_queue_info *wrbq, + struct hwi_wrb_context *pwrb_context, + uint8_t ulp_num) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_wrbq_create_req *req = embedded_payload(wrb); struct be_wrbq_create_resp *resp = embedded_payload(wrb); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); int status; spin_lock(&ctrl->mbox_lock); @@ -1005,17 +1225,78 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + + if (phba->fw_config.dual_ulp_aware) { + req->ulp_num = ulp_num; + req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); + req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); + } + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify(ctrl); if (!status) { wrbq->id = le16_to_cpu(resp->cid); wrbq->created = true; + + pwrb_context->cid = wrbq->id; + if (!phba->fw_config.dual_ulp_aware) { + pwrb_context->doorbell_offset = DB_TXULP0_OFFSET; + pwrb_context->ulp_num = BEISCSI_ULP0; + } else { + pwrb_context->ulp_num = resp->ulp_num; + pwrb_context->doorbell_offset = resp->doorbell_offset; + } } spin_unlock(&ctrl->mbox_lock); return status; } +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_post_template_pages_req *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->mbox_lock); + + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS, + sizeof(*req)); + + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; + be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + + status = be_mbox_notify(ctrl); + spin_unlock(&ctrl->mbox_lock); + return status; +} + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_remove_template_pages_req *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->mbox_lock); + + memset(wrb, 0, sizeof(*wrb)); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS, + sizeof(*req)); + + req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; + + status = be_mbox_notify(ctrl); + spin_unlock(&ctrl->mbox_lock); + return status; +} + int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, u32 page_offset, u32 num_pages) diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 23397d51ac5..cc7405c0eca 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -40,6 +40,7 @@ struct be_mcc_wrb { u32 tag1; /* dword 3 */ u32 rsvd; /* dword 4 */ union { +#define EMBED_MBX_MAX_PAYLOAD_SIZE 220 u8 embedded_payload[236]; /* used by embedded cmds */ struct be_sge sgl[19]; /* used by non-embedded cmds */ } payload; @@ -52,6 +53,10 @@ struct be_mcc_wrb { /* Completion Status */ #define MCC_STATUS_SUCCESS 0x0 +#define MCC_STATUS_FAILED 0x1 +#define MCC_STATUS_ILLEGAL_REQUEST 0x2 +#define MCC_STATUS_ILLEGAL_FIELD 0x3 +#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 #define CQE_STATUS_COMPL_MASK 0xFFFF #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ @@ -66,6 +71,7 @@ struct be_mcc_wrb { #define BEISCSI_FW_MBX_TIMEOUT 100 /* MBOX Command VER */ +#define MBX_CMD_VER1 0x01 #define MBX_CMD_VER2 0x02 struct be_mcc_compl { @@ -98,7 +104,7 @@ struct be_mcc_compl { /********** MCC door bell ************/ #define DB_MCCQ_OFFSET 0x140 -#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */ +#define DB_MCCQ_RING_ID_MASK 0xFFFF /* bits 0 - 15 */ /* Number of entries posted */ #define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */ @@ -118,7 +124,8 @@ struct be_async_event_trailer { enum { ASYNC_EVENT_LINK_DOWN = 0x0, - ASYNC_EVENT_LINK_UP = 0x1 + ASYNC_EVENT_LINK_UP = 0x1, + ASYNC_EVENT_LOGICAL = 0x2 }; /** @@ -130,6 +137,9 @@ struct be_async_event_link_state { u8 port_link_status; u8 port_duplex; u8 port_speed; +#define BEISCSI_PHY_LINK_FAULT_NONE 0x00 +#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01 +#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02 u8 port_fault; u8 rsvd0[7]; struct be_async_event_trailer trailer; @@ -154,6 +164,8 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_CQ_CREATE 12 #define OPCODE_COMMON_EQ_CREATE 13 #define OPCODE_COMMON_MCC_CREATE 21 +#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24 +#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25 #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 #define OPCODE_COMMON_GET_FW_VERSION 35 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41 @@ -209,6 +221,10 @@ struct phys_addr { u32 hi; }; +struct virt_addr { + u32 lo; + u32 hi; +}; /************************** * BE Command definitions * **************************/ @@ -256,6 +272,12 @@ struct be_cmd_resp_eq_create { u16 rsvd0; /* sword */ } __packed; +struct be_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +} __packed; + struct mgmt_chap_format { u32 flags; u8 intr_chap_name[256]; @@ -607,7 +629,7 @@ struct be_cmd_req_modify_eq_delay { u32 eq_id; u32 phase; u32 delay_multiplier; - } delay[8]; + } delay[MAX_CPUS]; } __packed; /******************** Get MAC ADDR *******************/ @@ -693,10 +715,14 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba); void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, + int num); int beiscsi_mccq_compl(struct beiscsi_hba *phba, - uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); + uint32_t tag, struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); @@ -713,7 +739,13 @@ int be_mbox_notify(struct be_ctrl_info *ctrl); int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *dq, int length, - int entry_size); + int entry_size, uint8_t is_header, + uint8_t ulp_num); + +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, + struct be_dma_mem *q_mem); + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl); int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, u32 page_offset, @@ -722,7 +754,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, int beiscsi_cmd_reset_function(struct beiscsi_hba *phba); int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, - struct be_queue_info *wrbq); + struct be_queue_info *wrbq, + struct hwi_wrb_context *pwrb_context, + uint8_t ulp_num); bool is_link_state_evt(u32 trailer); @@ -751,11 +785,25 @@ struct amap_be_default_pdu_context { u8 rsvd4[32]; /* dword 3 */ } __packed; +struct amap_default_pdu_context_ext { + u8 rsvd0[16]; /* dword 0 */ + u8 ring_size[4]; /* dword 0 */ + u8 rsvd1[12]; /* dword 0 */ + u8 rsvd2[22]; /* dword 1 */ + u8 rx_pdid[9]; /* dword 1 */ + u8 rx_pdid_valid; /* dword 1 */ + u8 default_buffer_size[16]; /* dword 2 */ + u8 cq_id_recv[16]; /* dword 2 */ + u8 rsvd3[32]; /* dword 3 */ +} __packed; + struct be_defq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; u8 ulp_num; - u8 rsvd0; +#define BEISCSI_DUAL_ULP_AWARE_BIT 0 /* Byte 3 - Bit 0 */ +#define BEISCSI_BIND_Q_TO_ULP_BIT 1 /* Byte 3 - Bit 1 */ + u8 dua_feature; struct be_default_pdu_context context; struct phys_addr pages[8]; } __packed; @@ -763,6 +811,27 @@ struct be_defq_create_req { struct be_defq_create_resp { struct be_cmd_req_hdr hdr; u16 id; + u8 rsvd0; + u8 ulp_num; + u32 doorbell_offset; + u16 register_set; + u16 doorbell_format; +} __packed; + +struct be_post_template_pages_req { + struct be_cmd_req_hdr hdr; + u16 num_pages; +#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI 0x1 + u16 type; + struct phys_addr scratch_pa; + struct virt_addr scratch_va; + struct virt_addr pages_va; + struct phys_addr pages[16]; +} __packed; + +struct be_remove_template_pages_req { + struct be_cmd_req_hdr hdr; + u16 type; u16 rsvd0; } __packed; @@ -779,14 +848,18 @@ struct be_wrbq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; u8 ulp_num; - u8 rsvd0; + u8 dua_feature; struct phys_addr pages[8]; } __packed; struct be_wrbq_create_resp { struct be_cmd_resp_hdr resp_hdr; u16 cid; - u16 rsvd0; + u8 rsvd0; + u8 ulp_num; + u32 doorbell_offset; + u16 register_set; + u16 doorbell_format; } __packed; #define SOL_CID_MASK 0x0000FFC0 @@ -896,7 +969,7 @@ struct amap_it_dmsg_cqe_v2 { * stack to notify the * controller of a posted Work Request Block */ -#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */ #define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ #define DB_DEF_PDU_WRB_INDEX_SHIFT 16 @@ -941,6 +1014,26 @@ struct tcp_connect_and_offload_in { u8 rsvd0[3]; } __packed; +struct tcp_connect_and_offload_in_v1 { + struct be_cmd_req_hdr hdr; + struct ip_addr_format ip_address; + u16 tcp_port; + u16 cid; + u16 cq_id; + u16 defq_id; + struct phys_addr dataout_template_pa; + u16 hdr_ring_id; + u16 data_ring_id; + u8 do_offload; + u8 ifd_state; + u8 rsvd0[2]; + u16 tcp_window_size; + u8 tcp_window_scale_count; + u8 rsvd1; + u32 tcp_mss:24; + u8 rsvd2; +} __packed; + struct tcp_connect_and_offload_out { struct be_cmd_resp_hdr hdr; u32 connection_handle; @@ -954,8 +1047,8 @@ struct be_mcc_wrb_context { int *users_final_status; } __packed; -#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */ -#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */ +#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */ +#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */ #define DB_DEF_PDU_REARM_SHIFT 14 #define DB_DEF_PDU_EVENT_SHIFT 15 #define DB_DEF_PDU_CQPROC_SHIFT 16 @@ -981,6 +1074,7 @@ union tcp_upload_params { } __packed; struct be_ulp_fw_cfg { +#define BEISCSI_ULP_ISCSI_INI_MODE 0x10 u32 ulp_mode; u32 etx_base; u32 etx_count; @@ -996,14 +1090,26 @@ struct be_ulp_fw_cfg { u32 icd_count; }; +struct be_ulp_chain_icd { + u32 chain_base; + u32 chain_count; +}; + struct be_fw_cfg { struct be_cmd_req_hdr hdr; u32 be_config_number; u32 asic_revision; u32 phys_port; +#define BEISCSI_FUNC_ISCSI_INI_MODE 0x10 +#define BEISCSI_FUNC_DUA_MODE 0x800 u32 function_mode; struct be_ulp_fw_cfg ulp[2]; u32 function_caps; + u32 cqid_base; + u32 cqid_count; + u32 eqid_base; + u32 eqid_count; + struct be_ulp_chain_icd chain_icd[2]; } __packed; struct be_cmd_get_all_if_id_req { @@ -1241,4 +1347,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, u8 subsystem, u8 opcode, int cmd_len); +void be2iscsi_fail_session(struct iscsi_cls_session *cls_session); #endif /* !BEISCSI_CMDS_H */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 214d691adb5..fd284ff36ec 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -58,10 +58,15 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, } beiscsi_ep = ep->dd_data; phba = beiscsi_ep->phba; - shost = phba->shost; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : In beiscsi_session_create\n"); + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : PCI_ERROR Recovery\n"); + return NULL; + } else { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_session_create\n"); + } if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, @@ -74,6 +79,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep, cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn; } + shost = phba->shost; cls_session = iscsi_session_setup(&beiscsi_iscsi_transport, shost, cmds_max, sizeof(*beiscsi_sess), @@ -161,7 +167,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, struct beiscsi_conn *beiscsi_conn, unsigned int cid) { - if (phba->conn_table[cid]) { + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Connection table already occupied. Detected clash\n"); @@ -169,9 +177,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, } else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", - cid, beiscsi_conn); + cri_index, beiscsi_conn); - phba->conn_table[cid] = beiscsi_conn; + phba->conn_table[cri_index] = beiscsi_conn; } return 0; } @@ -192,6 +200,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); struct beiscsi_hba *phba = iscsi_host_priv(shost); + struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -212,9 +222,13 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, return -EEXIST; } + pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID( + beiscsi_ep->ep_cid)]; + beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid; beiscsi_conn->ep = beiscsi_ep; beiscsi_ep->conn = beiscsi_conn; + beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n", @@ -263,13 +277,17 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba) void beiscsi_create_def_ifaces(struct beiscsi_hba *phba) { - struct be_cmd_get_if_info_resp if_info; + struct be_cmd_get_if_info_resp *if_info; - if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) + if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) { beiscsi_create_ipv4_iface(phba); + kfree(if_info); + } - if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) + if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) { beiscsi_create_ipv6_iface(phba); + kfree(if_info); + } } void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba) @@ -369,7 +387,7 @@ beiscsi_set_vlan_tag(struct Scsi_Host *shost, break; default: beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, - "BS_%d : Unkown Param Type : %d\n", + "BS_%d : Unknown Param Type : %d\n", iface_param->param); return -ENOSYS; } @@ -465,6 +483,12 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost, uint32_t rm_len = dt_len; int ret = 0 ; + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } + nla_for_each_attr(attrib, data, dt_len, rm_len) { iface_param = nla_data(attrib); @@ -510,11 +534,9 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba, struct iscsi_iface *iface, int param, char *buf) { - struct be_cmd_get_if_info_resp if_info; + struct be_cmd_get_if_info_resp *if_info; int len, ip_type = BE2_IPV4; - memset(&if_info, 0, sizeof(if_info)); - if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) ip_type = BE2_IPV6; @@ -524,45 +546,46 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba, switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: - len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr); + len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr); break; case ISCSI_NET_PARAM_IPV6_ADDR: - len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr); + len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr); break; case ISCSI_NET_PARAM_IPV4_BOOTPROTO: - if (!if_info.dhcp_state) + if (!if_info->dhcp_state) len = sprintf(buf, "static\n"); else len = sprintf(buf, "dhcp\n"); break; case ISCSI_NET_PARAM_IPV4_SUBNET: - len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask); + len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask); break; case ISCSI_NET_PARAM_VLAN_ENABLED: len = sprintf(buf, "%s\n", - (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) + (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ? "Disabled\n" : "Enabled\n"); break; case ISCSI_NET_PARAM_VLAN_ID: - if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) - return -EINVAL; + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) + len = -EINVAL; else len = sprintf(buf, "%d\n", - (if_info.vlan_priority & + (if_info->vlan_priority & ISCSI_MAX_VLAN_ID)); break; case ISCSI_NET_PARAM_VLAN_PRIORITY: - if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) - return -EINVAL; + if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) + len = -EINVAL; else len = sprintf(buf, "%d\n", - ((if_info.vlan_priority >> 13) & + ((if_info->vlan_priority >> 13) & ISCSI_MAX_VLAN_PRIORITY)); break; default: WARN_ON(1); } + kfree(if_info); return len; } @@ -575,6 +598,12 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface, struct be_cmd_get_def_gateway_resp gateway; int len = -ENOSYS; + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } + switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: case ISCSI_NET_PARAM_IPV4_SUBNET: @@ -670,8 +699,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, session->max_burst = 262144; break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: - if ((conn->max_xmit_dlength > 65536) || - (conn->max_xmit_dlength == 0)) + if (conn->max_xmit_dlength > 65536) conn->max_xmit_dlength = 65536; default: return 0; @@ -725,7 +753,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost) struct beiscsi_hba *phba = iscsi_host_priv(shost); struct iscsi_cls_host *ihost = shost->shost_data; - ihost->port_state = (phba->state == BE_ADAPTER_UP) ? + ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ? ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; } @@ -765,7 +793,7 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost) ihost->port_speed = ISCSI_PORT_SPEED_10MBPS; break; case BE2ISCSI_LINK_SPEED_100MBPS: - ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS; + ihost->port_speed = ISCSI_PORT_SPEED_100MBPS; break; case BE2ISCSI_LINK_SPEED_1GBPS: ihost->port_speed = ISCSI_PORT_SPEED_1GBPS; @@ -793,9 +821,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost, struct beiscsi_hba *phba = iscsi_host_priv(shost); int status = 0; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : In beiscsi_get_host_param," - " param= %d\n", param); + + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } else { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_get_host_param," + " param = %d\n", param); + } switch (param) { case ISCSI_HOST_PARAM_HWADDRESS: @@ -838,7 +873,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) struct be_cmd_get_nic_conf_resp resp; int rc; - if (strlen(phba->mac_address)) + if (phba->mac_addr_set) return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); memset(&resp, 0, sizeof(resp)); @@ -846,6 +881,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) if (rc) return rc; + phba->mac_addr_set = true; memcpy(phba->mac_address, resp.mac_address, ETH_ALEN); return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN); } @@ -921,6 +957,10 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn, session->max_r2t); AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params, (conn->exp_statsn - 1)); + AMAP_SET_BITS(struct amap_beiscsi_offload_params, + max_recv_data_segment_length, params, + conn->max_recv_dlength); + } /** @@ -933,10 +973,19 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_endpoint *beiscsi_ep; struct beiscsi_offload_params params; + struct beiscsi_hba *phba; - beiscsi_log(beiscsi_conn->phba, KERN_INFO, - BEISCSI_LOG_CONFIG, - "BS_%d : In beiscsi_conn_start\n"); + phba = ((struct beiscsi_conn *)conn->dd_data)->phba; + + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return -EBUSY; + } else { + beiscsi_log(beiscsi_conn->phba, KERN_INFO, + BEISCSI_LOG_CONFIG, + "BS_%d : In beiscsi_conn_start\n"); + } memset(¶ms, 0, sizeof(struct beiscsi_offload_params)); beiscsi_ep = beiscsi_conn->ep; @@ -958,15 +1007,31 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn) */ static int beiscsi_get_cid(struct beiscsi_hba *phba) { - unsigned short cid = 0xFFFF; - - if (!phba->avlbl_cids) - return cid; - - cid = phba->cid_array[phba->cid_alloc++]; - if (phba->cid_alloc == phba->params.cxns_per_ctrl) - phba->cid_alloc = 0; - phba->avlbl_cids--; + unsigned short cid = 0xFFFF, cid_from_ulp; + struct ulp_cid_info *cid_info = NULL; + uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; + + /* Find the ULP which has more CID available */ + cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? + BEISCSI_ULP0_AVLBL_CID(phba) : 0; + cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ? + BEISCSI_ULP1_AVLBL_CID(phba) : 0; + cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? + BEISCSI_ULP0 : BEISCSI_ULP1; + + if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) { + cid_info = phba->cid_array_info[cid_from_ulp]; + if (!cid_info->avlbl_cids) + return cid; + + cid = cid_info->cid_array[cid_info->cid_alloc++]; + + if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT( + phba, cid_from_ulp)) + cid_info->cid_alloc = 0; + + cid_info->avlbl_cids--; + } return cid; } @@ -977,10 +1042,22 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba) */ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) { - phba->avlbl_cids++; - phba->cid_array[phba->cid_free++] = cid; - if (phba->cid_free == phba->params.cxns_per_ctrl) - phba->cid_free = 0; + uint16_t cid_post_ulp; + struct hwi_controller *phwi_ctrlr; + struct hwi_wrb_context *pwrb_context; + struct ulp_cid_info *cid_info = NULL; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + cid_post_ulp = pwrb_context->ulp_num; + + cid_info = phba->cid_array_info[cid_post_ulp]; + cid_info->avlbl_cids++; + + cid_info->cid_array[cid_info->cid_free++] = cid; + if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) + cid_info->cid_free = 0; } /** @@ -990,9 +1067,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) { struct beiscsi_hba *phba = beiscsi_ep->phba; + struct beiscsi_conn *beiscsi_conn; beiscsi_put_cid(phba, beiscsi_ep->ep_cid); beiscsi_ep->phba = NULL; + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = NULL; + + /** + * Check if any connection resource allocated by driver + * is to be freed.This case occurs when target redirection + * or connection retry is done. + **/ + if (!beiscsi_ep->conn) + return; + + beiscsi_conn = beiscsi_ep->conn; + if (beiscsi_conn->login_in_progress) { + beiscsi_free_mgmt_task_handles(beiscsi_conn, + beiscsi_conn->task); + beiscsi_conn->login_in_progress = 0; + } } /** @@ -1009,10 +1104,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; struct beiscsi_hba *phba = beiscsi_ep->phba; - struct be_mcc_wrb *wrb; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; - unsigned int tag; + unsigned int tag, req_memsize; int ret = -ENOMEM; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, @@ -1029,19 +1123,18 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", beiscsi_ep->ep_cid); - phba->ep_array[beiscsi_ep->ep_cid - - phba->fw_config.iscsi_cid_start] = ep; - if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + - phba->params.cxns_per_ctrl * 2)) { - - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Failed in allocate iscsi cid\n"); - goto free_ep; - } + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; + + if (is_chip_be2_be3r(phba)) + req_memsize = sizeof(struct tcp_connect_and_offload_in); + else + req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); + nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(struct tcp_connect_and_offload_in), + req_memsize, &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { @@ -1049,35 +1142,38 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : Failed to allocate memory for" " mgmt_open_connection\n"); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); + beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } - nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); + nonemb_cmd.size = req_memsize; memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); - if (!tag) { + if (tag <= 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Failed for cid=%d\n", beiscsi_ep->ep_cid); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); + beiscsi_free_ep(beiscsi_ep); return -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BS_%d : mgmt_open_connection Failed"); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); - goto free_ep; + if (ret != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + + beiscsi_free_ep(beiscsi_ep); + return ret; } - ptcpcnct_out = embedded_payload(wrb); + ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; beiscsi_ep = ep->dd_data; beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; beiscsi_ep->cid_vld = 1; @@ -1087,10 +1183,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return 0; - -free_ep: - beiscsi_free_ep(beiscsi_ep); - return -EBUSY; } /** @@ -1119,7 +1211,19 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, return ERR_PTR(ret); } - if (phba->state != BE_ADAPTER_UP) { + if (beiscsi_error(phba)) { + ret = -EIO; + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BS_%d : The FW state Not Stable!!!\n"); + return ERR_PTR(ret); + } + + if (phba->state & BE_ADAPTER_PCI_ERR) { + ret = -EBUSY; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : In PCI_ERROR Recovery\n"); + return ERR_PTR(ret); + } else if (phba->state & BE_ADAPTER_LINK_DOWN) { ret = -EBUSY; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BS_%d : The Adapter Port state is Down!!!\n"); @@ -1201,8 +1305,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, unsigned int cid) { - if (phba->conn_table[cid]) - phba->conn_table[cid] = NULL; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) + phba->conn_table[cri_index] = NULL; else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : Connection table Not occupied.\n"); @@ -1242,6 +1348,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) tcp_upload_flag = CONNECTION_UPLOAD_ABORT; } + if (phba->state & BE_ADAPTER_PCI_ERR) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, + "BS_%d : PCI_ERROR Recovery\n"); + goto free_ep; + } + tag = mgmt_invalidate_connection(phba, beiscsi_ep, beiscsi_ep->ep_cid, mgmt_invalidate_flag, @@ -1254,6 +1366,8 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) beiscsi_mccq_compl(phba, tag, NULL, NULL); beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); +free_ep: + msleep(BEISCSI_LOGOUT_SYNC_DELAY); beiscsi_free_ep(beiscsi_ep); beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 38eab723215..31ddc849439 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 4e2733d2300..56467df3d6d 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -149,14 +149,25 @@ BEISCSI_RW_ATTR(log_enable, 0x00, "\t\t\t\tMiscellaneous Events : 0x04\n" "\t\t\t\tError Handling : 0x08\n" "\t\t\t\tIO Path Events : 0x10\n" - "\t\t\t\tConfiguration Path : 0x20\n"); + "\t\t\t\tConfiguration Path : 0x20\n" + "\t\t\t\tiSCSI Protocol : 0x40\n"); DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); +DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); +DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); +DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, + beiscsi_active_session_disp, NULL); +DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, + beiscsi_free_session_disp, NULL); struct device_attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable, &dev_attr_beiscsi_drvr_ver, &dev_attr_beiscsi_adapter_family, + &dev_attr_beiscsi_fw_ver, + &dev_attr_beiscsi_active_session_count, + &dev_attr_beiscsi_free_session_count, + &dev_attr_beiscsi_phys_port, NULL, }; @@ -217,24 +228,30 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) struct invalidate_command_table *inv_tbl; struct be_dma_mem nonemb_cmd; unsigned int cid, tag, num_invalidate; + int rc; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (!aborted_task || !aborted_task->sc) { /* we raced */ - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return SUCCESS; } aborted_io_task = aborted_task->dd_data; if (!aborted_io_task->scsi_cmnd) { /* raced or invalid command */ - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return SUCCESS; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); + /* Invalidate WRB Posted for this Task */ + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + aborted_io_task->pwrb_handle->pwrb, + 1); + conn = aborted_task->conn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; @@ -269,9 +286,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) return FAILED; } - beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); + rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return iscsi_eh_abort(sc); } @@ -287,13 +306,14 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) struct invalidate_command_table *inv_tbl; struct be_dma_mem nonemb_cmd; unsigned int cid, tag, i, num_invalidate; + int rc; /* invalidate iocbs */ cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return FAILED; } conn = session->leadconn; @@ -309,15 +329,20 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) continue; - if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) + if (sc->device->lun != abrt_task->sc->device->lun) continue; + /* Invalidate WRB Posted for this Task */ + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + abrt_io_task->pwrb_handle->pwrb, + 1); + inv_tbl->cid = cid; inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; num_invalidate++; inv_tbl++; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); inv_tbl = phba->inv_tbl; nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, @@ -342,9 +367,10 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) return FAILED; } - beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); + rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); return iscsi_eh_device_reset(sc); } @@ -573,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) pci_set_drvdata(pcidev, phba); phba->interface_handle = 0xFFFFFFFF; - if (iscsi_host_add(shost, &phba->pcidev->dev)) - goto free_devices; - return phba; - -free_devices: - pci_dev_put(phba->pcidev); - iscsi_host_free(phba->shost); - return NULL; } static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) @@ -653,8 +671,19 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev) } pci_set_master(pcidev); - if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { - ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); + if (ret) { + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); + pci_disable_device(pcidev); + return ret; + } else { + ret = pci_set_consistent_dma_mask(pcidev, + DMA_BIT_MASK(32)); + } + } else { + ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret) { dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); pci_disable_device(pcidev); @@ -695,30 +724,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) return status; } +/** + * beiscsi_get_params()- Set the config paramters + * @phba: ptr device priv structure + **/ static void beiscsi_get_params(struct beiscsi_hba *phba) { - phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count - - (phba->fw_config.iscsi_cid_count - + BE2_TMFS - + BE2_NOPOUT_REQ)); - phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; - phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; + uint32_t total_cid_count = 0; + uint32_t total_icd_count = 0; + uint8_t ulp_num = 0; + + total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint32_t align_mask = 0; + uint32_t icd_post_per_page = 0; + uint32_t icd_count_unavailable = 0; + uint32_t icd_start = 0, icd_count = 0; + uint32_t icd_start_align = 0, icd_count_align = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + + /* Get ICD count that can be posted on each page */ + icd_post_per_page = (PAGE_SIZE / (BE2_SGE * + sizeof(struct iscsi_sge))); + align_mask = (icd_post_per_page - 1); + + /* Check if icd_start is aligned ICD per page posting */ + if (icd_start % icd_post_per_page) { + icd_start_align = ((icd_start + + icd_post_per_page) & + ~(align_mask)); + phba->fw_config. + iscsi_icd_start[ulp_num] = + icd_start_align; + } + + icd_count_align = (icd_count & ~align_mask); + + /* ICD discarded in the process of alignment */ + if (icd_start_align) + icd_count_unavailable = ((icd_start_align - + icd_start) + + (icd_count - + icd_count_align)); + + /* Updated ICD count available */ + phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - + icd_count_unavailable); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Aligned ICD values\n" + "\t ICD Start : %d\n" + "\t ICD Count : %d\n" + "\t ICD Discarded : %d\n", + phba->fw_config. + iscsi_icd_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + icd_count_unavailable); + break; + } + } + + total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + phba->params.ios_per_ctrl = (total_icd_count - + (total_cid_count + + BE2_TMFS + BE2_NOPOUT_REQ)); + phba->params.cxns_per_ctrl = total_cid_count; + phba->params.asyncpdus_per_ctrl = total_cid_count; + phba->params.icds_per_ctrl = total_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; phba->params.eq_timer = 64; - phba->params.num_eq_entries = - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 - + BE2_TMFS) / 512) + 1) * 512; - phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) - ? 1024 : phba->params.num_eq_entries; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : phba->params.num_eq_entries=%d\n", - phba->params.num_eq_entries); - phba->params.num_cq_entries = - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 - + BE2_TMFS) / 512) + 1) * 512; + phba->params.num_eq_entries = 1024; + phba->params.num_cq_entries = 1024; phba->params.wrbs_per_cxn = 256; } @@ -728,14 +812,23 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba, unsigned char rearm, unsigned char event) { u32 val = 0; - val |= id & DB_EQ_RING_ID_MASK; + if (rearm) val |= 1 << DB_EQ_REARM_SHIFT; if (clr_interrupt) val |= 1 << DB_EQ_CLR_SHIFT; if (event) val |= 1 << DB_EQ_EVNT_SHIFT; + val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; + /* Setting lower order EQ_ID Bits */ + val |= (id & DB_EQ_RING_ID_LOW_MASK); + + /* Setting Higher order EQ_ID Bits */ + val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & + DB_EQ_RING_ID_HIGH_MASK) + << DB_EQ_HIGH_SET_SHIFT); + iowrite32(val, phba->db_va + DB_EQ_OFFSET); } @@ -797,7 +890,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id) struct be_queue_info *cq; unsigned int num_eq_processed; struct be_eq_obj *pbe_eq; - unsigned long flags; pbe_eq = dev_id; eq = &pbe_eq->q; @@ -806,31 +898,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id) phba = pbe_eq->phba; num_eq_processed = 0; - if (blk_iopoll_enabled) { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); - - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - num_eq_processed++; - } - } else { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - num_eq_processed++; - } + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); - if (pbe_eq->todo_cq) - queue_work(phba->wq, &pbe_eq->work_cqs); + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_eq_processed++; } if (num_eq_processed) @@ -851,7 +927,6 @@ static irqreturn_t be_isr(int irq, void *dev_id) struct hwi_context_memory *phwi_context; struct be_eq_entry *eqe = NULL; struct be_queue_info *eq; - struct be_queue_info *cq; struct be_queue_info *mcc; unsigned long flags, index; unsigned int num_mcceq_processed, num_ioeq_processed; @@ -877,72 +952,40 @@ static irqreturn_t be_isr(int irq, void *dev_id) num_ioeq_processed = 0; num_mcceq_processed = 0; - if (blk_iopoll_enabled) { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - if (((eqe->dw[offsetof(struct amap_eq_entry, - resource_id) / 32] & - EQE_RESID_MASK) >> 16) == mcc->id) { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_mcc_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - num_mcceq_processed++; - } else { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); - num_ioeq_processed++; - } - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - } - if (num_ioeq_processed || num_mcceq_processed) { - if (pbe_eq->todo_mcc_cq) - queue_work(phba->wq, &pbe_eq->work_cqs); - - if ((num_mcceq_processed) && (!num_ioeq_processed)) - hwi_ring_eq_db(phba, eq->id, 0, - (num_ioeq_processed + - num_mcceq_processed) , 1, 1); - else - hwi_ring_eq_db(phba, eq->id, 0, - (num_ioeq_processed + - num_mcceq_processed), 0, 1); - - return IRQ_HANDLED; - } else - return IRQ_NONE; - } else { - cq = &phwi_context->be_cq[0]; - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - - if (((eqe->dw[offsetof(struct amap_eq_entry, - resource_id) / 32] & - EQE_RESID_MASK) >> 16) != cq->id) { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_mcc_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - } else { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - } - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (((eqe->dw[offsetof(struct amap_eq_entry, + resource_id) / 32] & + EQE_RESID_MASK) >> 16) == mcc->id) { + spin_lock_irqsave(&phba->isr_lock, flags); + pbe_eq->todo_mcc_cq = true; + spin_unlock_irqrestore(&phba->isr_lock, flags); + num_mcceq_processed++; + } else { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); num_ioeq_processed++; } - if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq) + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + } + if (num_ioeq_processed || num_mcceq_processed) { + if (pbe_eq->todo_mcc_cq) queue_work(phba->wq, &pbe_eq->work_cqs); - if (num_ioeq_processed) { + if ((num_mcceq_processed) && (!num_ioeq_processed)) hwi_ring_eq_db(phba, eq->id, 0, - num_ioeq_processed, 1, 1); - return IRQ_HANDLED; - } else - return IRQ_NONE; - } + (num_ioeq_processed + + num_mcceq_processed) , 1, 1); + else + hwi_ring_eq_db(phba, eq->id, 0, + (num_ioeq_processed + + num_mcceq_processed), 0, 1); + + return IRQ_HANDLED; + } else + return IRQ_NONE; } static int beiscsi_init_irqs(struct beiscsi_hba *phba) @@ -1017,22 +1060,31 @@ free_msix_irqs: return ret; } -static void hwi_ring_cq_db(struct beiscsi_hba *phba, +void hwi_ring_cq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int num_processed, unsigned char rearm, unsigned char event) { u32 val = 0; - val |= id & DB_CQ_RING_ID_MASK; + if (rearm) val |= 1 << DB_CQ_REARM_SHIFT; + val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; + + /* Setting lower order CQ_ID Bits */ + val |= (id & DB_CQ_RING_ID_LOW_MASK); + + /* Setting Higher order CQ_ID Bits */ + val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & + DB_CQ_RING_ID_HIGH_MASK) + << DB_CQ_HIGH_SET_SHIFT); + iowrite32(val, phba->db_va + DB_CQ_OFFSET); } static unsigned int beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, - unsigned short cid, struct pdu_base *ppdu, unsigned long pdu_len, void *pbuffer, unsigned long buf_len) @@ -1075,9 +1127,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, return 1; } - spin_lock_bh(&session->lock); + spin_lock_bh(&session->back_lock); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); return 0; } @@ -1144,9 +1196,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[cid]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (pwrb_context->wrb_handles_available >= 2) { pwrb_handle = pwrb_context->pwrb_handle_base[ pwrb_context->alloc_index]; @@ -1266,8 +1319,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, resid = csol_cqe->res_cnt; if (!task->sc) { - if (io_task->scsi_cmnd) + if (io_task->scsi_cmnd) { scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } return; } @@ -1304,6 +1359,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, conn->rxdata_octets += resid; unmap: scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); } @@ -1322,8 +1378,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, hdr->t2retain = 0; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->dlength[0] = 0; hdr->dlength[1] = 0; @@ -1346,9 +1403,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); @@ -1363,35 +1420,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; struct beiscsi_io_task *io_task; - struct iscsi_conn *conn = beiscsi_conn->conn; - struct iscsi_session *session = conn->session; - uint16_t wrb_index, cid; + uint16_t wrb_index, cid, cri_index; phwi_ctrlr = phba->phwi_ctrlr; - if (chip_skh_r(phba->pcidev)) { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + if (is_chip_be2_be3r(phba)) { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, cid, psol); } else { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, cid, psol); } - pwrb_context = &phwi_ctrlr->wrb_context[ - cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; task = pwrb_handle->pio_handle; io_task = task->dd_data; - spin_lock_bh(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, io_task->psgl_handle); - spin_unlock_bh(&phba->mgmt_sgl_lock); - spin_lock_bh(&session->lock); - free_wrb_handle(phba, pwrb_context, pwrb_handle); - spin_unlock_bh(&session->lock); + memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); + iscsi_put_task(task); } static void @@ -1406,8 +1457,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, hdr = (struct iscsi_nopin *)task->hdr; hdr->flags = csol_cqe->i_flags; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); - hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->opcode = ISCSI_OP_NOOP_IN; hdr->itt = io_task->libiscsi_itt; @@ -1418,7 +1469,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, struct sol_cqe *psol, struct common_sol_cqe *csol_cqe) { - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, + i_res_cnt, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + i_cmd_wnd, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, + hw_sts, psol); + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, + i_resp, psol); + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, + i_sts, psol); + csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, + i_flags, psol); + } else { csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_exp_cmd_sn, psol); csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, @@ -1429,7 +1499,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, cid, psol); csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, hw_sts, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_cmd_wnd, psol); if (AMAP_GET_BITS(struct amap_sol_cqe_v2, cmd_cmpl, psol)) @@ -1445,25 +1515,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, if (AMAP_GET_BITS(struct amap_sol_cqe_v2, o, psol)) csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; - } else { - csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, - i_exp_cmd_sn, psol); - csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, - i_res_cnt, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, - i_cmd_wnd, psol); - csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, - wrb_index, psol); - csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, - cid, psol); - csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, - hw_sts, psol); - csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, - i_resp, psol); - csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, - i_sts, psol); - csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, - i_flags, psol); } } @@ -1480,14 +1531,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; struct common_sol_cqe csol_cqe = {0}; + uint16_t cri_index = 0; phwi_ctrlr = phba->phwi_ctrlr; /* Copy the elements to a common structure */ adapter_get_sol_cqe(phba, psol, &csol_cqe); - pwrb_context = &phwi_ctrlr->wrb_context[ - csol_cqe.cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[ csol_cqe.wrb_index]; @@ -1496,7 +1548,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, pwrb = pwrb_handle->pwrb; type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->back_lock); switch (type) { case HWH_TYPE_IO: case HWH_TYPE_IO_RD: @@ -1535,7 +1587,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, break; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); } static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context @@ -1561,15 +1613,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba, unsigned char is_header = 0; unsigned int index, dpl; - if (chip_skh_r(phba->pcidev)) { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + if (is_chip_be2_be3r(phba)) { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, index, pdpdu_cqe); } else { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, index, pdpdu_cqe); } @@ -1613,8 +1665,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba, WARN_ON(!pasync_handle); - pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start; + pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); pasync_handle->is_header = is_header; pasync_handle->buffer_len = dpl; *pcq_index = index; @@ -1674,18 +1726,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba, } static void hwi_free_async_msg(struct beiscsi_hba *phba, - unsigned int cri) + struct hwi_async_pdu_context *pasync_ctx, + unsigned int cri) { - struct hwi_controller *phwi_ctrlr; - struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle, *tmp_handle; struct list_head *plist; - phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); - plist = &pasync_ctx->async_entry[cri].wait_queue.list; - list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { list_del(&pasync_handle->link); @@ -1720,7 +1767,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, } static void hwi_post_async_buffers(struct beiscsi_hba *phba, - unsigned int is_header) + unsigned int is_header, uint8_t ulp_num) { struct hwi_controller *phwi_ctrlr; struct hwi_async_pdu_context *pasync_ctx; @@ -1728,13 +1775,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, struct list_head *pfree_link, *pbusy_list; struct phys_addr *pasync_sge; unsigned int ring_id, num_entries; - unsigned int host_write_num; + unsigned int host_write_num, doorbell_offset; unsigned int writables; unsigned int i = 0; u32 doorbell = 0; phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); num_entries = pasync_ctx->num_entries; if (is_header) { @@ -1742,13 +1789,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, pasync_ctx->async_header.free_entries); pfree_link = pasync_ctx->async_header.free_list.next; host_write_num = pasync_ctx->async_header.host_write_ptr; - ring_id = phwi_ctrlr->default_pdu_hdr.id; + ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. + doorbell_offset; } else { writables = min(pasync_ctx->async_data.writables, pasync_ctx->async_data.free_entries); pfree_link = pasync_ctx->async_data.free_list.next; host_write_num = pasync_ctx->async_data.host_write_ptr; - ring_id = phwi_ctrlr->default_pdu_data.id; + ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. + doorbell_offset; } writables = (writables / 8) * 8; @@ -1796,7 +1847,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; - iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + doorbell_offset); } } @@ -1808,9 +1859,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle = NULL; unsigned int cq_index = -1; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, pdpdu_cqe, &cq_index); @@ -1819,8 +1874,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, hwi_update_async_writables(phba, pasync_ctx, pasync_handle->is_header, cq_index); - hwi_free_async_msg(phba, pasync_handle->cri); - hwi_post_async_buffers(phba, pasync_handle->is_header); + hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); + hwi_post_async_buffers(phba, pasync_handle->is_header, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); } static unsigned int @@ -1856,12 +1913,10 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, } status = beiscsi_process_async_pdu(beiscsi_conn, phba, - (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start), phdr, hdr_len, pfirst_buffer, offset); - hwi_free_async_msg(phba, cri); + hwi_free_async_msg(phba, pasync_ctx, cri); return 0; } @@ -1877,13 +1932,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, struct pdu_base *ppdu; phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + BE_GET_CRI_FROM_CID(beiscsi_conn-> + beiscsi_conn_cid))); list_del(&pasync_handle->link); if (pasync_handle->is_header) { pasync_ctx->async_header.busy_entries--; if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { - hwi_free_async_msg(phba, cri); + hwi_free_async_msg(phba, pasync_ctx, cri); BUG(); } @@ -1938,9 +1996,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle = NULL; unsigned int cq_index = -1; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); + pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, pdpdu_cqe, &cq_index); @@ -1949,7 +2012,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, pasync_handle->is_header, cq_index); hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); - hwi_post_async_buffers(phba, pasync_handle->is_header); + hwi_post_async_buffers(phba, pasync_handle->is_header, + BEISCSI_GET_ULP_FROM_CRI( + phwi_ctrlr, cri_index)); } static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) @@ -2011,6 +2076,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) unsigned int num_processed = 0; unsigned int tot_nump = 0; unsigned short code = 0, cid = 0; + uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -2028,7 +2094,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 32] & CQE_CODE_MASK); /* Get the CID */ - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } else { if ((code == DRIVERMSG_NOTIFY) || (code == UNSOL_HDR_NOTIFY) || (code == UNSOL_DATA_NOTIFY)) @@ -2038,10 +2106,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) else cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, cid, sol); - } else - cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } - ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + ep = phba->ep_array[cri_index]; beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -2071,8 +2139,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) "BM_%d : Received %s[%d] on CID : %d\n", cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_process_default_pdu_ring(beiscsi_conn, phba, (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case UNSOL_DATA_NOTIFY: beiscsi_log(phba, KERN_INFO, @@ -2080,8 +2150,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) "BM_%d : Received %s[%d] on CID : %d\n", cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_process_default_pdu_ring(beiscsi_conn, phba, (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_INVALIDATE_INDEX_NOTIFY: case CMD_INVALIDATED_NOTIFY: @@ -2109,8 +2181,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_flush_default_pdu_buffer(phba, beiscsi_conn, (struct i_t_dpdu_cqe *) sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: case CXN_KILLED_BURST_LEN_MISMATCH: @@ -2191,12 +2265,13 @@ void beiscsi_process_all_cqs(struct work_struct *work) static int be_iopoll(struct blk_iopoll *iop, int budget) { - static unsigned int ret; + unsigned int ret; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; pbe_eq = container_of(iop, struct be_eq_obj, iopoll); ret = beiscsi_process_cq(pbe_eq); + pbe_eq->cq_count += ret; if (ret < budget) { phba = pbe_eq->phba; blk_iopoll_complete(iop); @@ -2416,11 +2491,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) /* Check for the data_count */ dsp_value = (task->data_count) ? 1 : 0; - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, dsp_value); else - AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, dsp_value); /* Map addr only if there is data_count */ @@ -2475,26 +2550,19 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); } +/** + * beiscsi_find_mem_req()- Find mem needed + * @phba: ptr to HBA struct + **/ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) { + uint8_t mem_descr_index, ulp_num; unsigned int num_cq_pages, num_async_pdu_buf_pages; unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ sizeof(struct sol_cqe)); - num_async_pdu_buf_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - phba->params.defpdu_hdr_sz); - num_async_pdu_buf_sgl_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - sizeof(struct phys_addr)); - num_async_pdu_data_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - phba->params.defpdu_data_sz); - num_async_pdu_data_sgl_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - sizeof(struct phys_addr)); phba->params.hwi_ws_sz = sizeof(struct hwi_controller); @@ -2516,30 +2584,86 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) phba->params.icds_per_ctrl; phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * phba->params.icds_per_ctrl; - - phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = - num_async_pdu_buf_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = - num_async_pdu_data_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = - num_async_pdu_buf_sgl_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = - num_async_pdu_data_sgl_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = - phba->params.asyncpdus_per_ctrl * - sizeof(struct async_pdu_handle); - phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = - phba->params.asyncpdus_per_ctrl * - sizeof(struct async_pdu_handle); - phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = - sizeof(struct hwi_async_pdu_context) + - (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + num_async_pdu_buf_sgl_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + sizeof(struct phys_addr)); + + num_async_pdu_buf_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + phba->params.defpdu_hdr_sz); + + num_async_pdu_data_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + phba->params.defpdu_data_sz); + + num_async_pdu_data_sgl_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + sizeof(struct phys_addr)); + + mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct async_pdu_handle); + + mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct async_pdu_handle); + + mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + sizeof(struct hwi_async_pdu_context) + + (BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct hwi_async_entry)); + } + } } static int beiscsi_alloc_mem(struct beiscsi_hba *phba) { - struct be_mem_descriptor *mem_descr; dma_addr_t bus_add; + struct hwi_controller *phwi_ctrlr; + struct be_mem_descriptor *mem_descr; struct mem_array *mem_arr, *mem_arr_orig; unsigned int i, j, alloc_size, curr_alloc_size; @@ -2547,9 +2671,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) if (!phba->phwi_ctrlr) return -ENOMEM; + /* Allocate memory for wrb_context */ + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctrlr->wrb_context) + return -ENOMEM; + phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); if (!phba->init_mem) { + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2558,12 +2691,19 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) GFP_KERNEL); if (!mem_arr_orig) { kfree(phba->init_mem); + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } mem_descr = phba->init_mem; for (i = 0; i < SE_MEM_MAX; i++) { + if (!phba->mem_req[i]) { + mem_descr->mem_array = NULL; + mem_descr++; + continue; + } + j = 0; mem_arr = mem_arr_orig; alloc_size = phba->mem_req[i]; @@ -2628,6 +2768,7 @@ free_mem: } kfree(mem_arr_orig); kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2666,6 +2807,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba) static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; + struct hwi_context_memory *phwi_ctxt; struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; @@ -2680,7 +2822,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) mem_descr_wrb += HWI_MEM_WRB; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + /* Allocate memory for WRBQ */ + phwi_ctxt = phwi_ctrlr->phwi_ctxt; + phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctxt->be_wrbq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRBQ Mem Alloc Failed\n"); + return -ENOMEM; + } + + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; pwrb_context->pwrb_handle_base = kzalloc(sizeof(struct wrb_handle *) * @@ -2723,7 +2876,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) } } idx = 0; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (!num_cxn_wrb) { pwrb = mem_descr_wrb->mem_array[idx].virtual_address; @@ -2752,8 +2905,9 @@ init_wrb_hndl_failed: return -ENOMEM; } -static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) +static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { + uint8_t ulp_num; struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; struct hwi_async_pdu_context *pasync_ctx; @@ -2761,146 +2915,150 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) unsigned int index, idx, num_per_mem, num_async_data; struct be_mem_descriptor *mem_descr; - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; - - phwi_ctrlr = phba->phwi_ctrlr; - phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = + (struct hwi_async_pdu_context *) + mem_descr->mem_array[0].virtual_address; + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; + memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + + pasync_ctx->async_entry = + (struct hwi_async_entry *) + ((long unsigned int)pasync_ctx + + sizeof(struct hwi_async_pdu_context)); + + pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, + ulp_num); + pasync_ctx->buffer_size = p->defpdu_hdr_sz; + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.va_base = mem_descr->mem_array[0].virtual_address; - pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; - memset(pasync_ctx, 0, sizeof(*pasync_ctx)); - - pasync_ctx->num_entries = p->asyncpdus_per_ctrl; - pasync_ctx->buffer_size = p->defpdu_hdr_sz; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_BUF; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_BUF va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.va_base = - mem_descr->mem_array[0].virtual_address; - - pasync_ctx->async_header.pa_base.u.a64.address = - mem_descr->mem_array[0].bus_address.u.a64.address; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_RING; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_RING va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.ring_base = - mem_descr->mem_array[0].virtual_address; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.handle_base = - mem_descr->mem_array[0].virtual_address; - pasync_ctx->async_header.writables = 0; - INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); - - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_RING; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_DATA_RING va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_data.ring_base = - mem_descr->mem_array[0].virtual_address; - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; - if (!mem_descr->mem_array[0].virtual_address) - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); + pasync_ctx->async_header.pa_base.u.a64.address = + mem_descr->mem_array[0]. + bus_address.u.a64.address; - pasync_ctx->async_data.handle_base = - mem_descr->mem_array[0].virtual_address; - pasync_ctx->async_data.writables = 0; - INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.ring_base = + mem_descr->mem_array[0].virtual_address; - pasync_header_h = - (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; - pasync_data_h = - (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.handle_base = + mem_descr->mem_array[0].virtual_address; + pasync_ctx->async_header.writables = 0; + INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_data.ring_base = + mem_descr->mem_array[0].virtual_address; - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_BUF; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_DATA_BUF va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (!mem_descr->mem_array[0].virtual_address) + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); - idx = 0; - pasync_ctx->async_data.va_base = - mem_descr->mem_array[idx].virtual_address; - pasync_ctx->async_data.pa_base.u.a64.address = - mem_descr->mem_array[idx].bus_address.u.a64.address; - - num_async_data = ((mem_descr->mem_array[idx].size) / - phba->params.defpdu_data_sz); - num_per_mem = 0; - - for (index = 0; index < p->asyncpdus_per_ctrl; index++) { - pasync_header_h->cri = -1; - pasync_header_h->index = (char)index; - INIT_LIST_HEAD(&pasync_header_h->link); - pasync_header_h->pbuffer = - (void *)((unsigned long) - (pasync_ctx->async_header.va_base) + - (p->defpdu_hdr_sz * index)); - - pasync_header_h->pa.u.a64.address = - pasync_ctx->async_header.pa_base.u.a64.address + - (p->defpdu_hdr_sz * index); - - list_add_tail(&pasync_header_h->link, - &pasync_ctx->async_header.free_list); - pasync_header_h++; - pasync_ctx->async_header.free_entries++; - pasync_ctx->async_header.writables++; - - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); - INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. - header_busy_list); - pasync_data_h->cri = -1; - pasync_data_h->index = (char)index; - INIT_LIST_HEAD(&pasync_data_h->link); - - if (!num_async_data) { - num_per_mem = 0; - idx++; + pasync_ctx->async_data.handle_base = + mem_descr->mem_array[0].virtual_address; + pasync_ctx->async_data.writables = 0; + INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); + + pasync_header_h = + (struct async_pdu_handle *) + pasync_ctx->async_header.handle_base; + pasync_data_h = + (struct async_pdu_handle *) + pasync_ctx->async_data.handle_base; + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + idx = 0; pasync_ctx->async_data.va_base = mem_descr->mem_array[idx].virtual_address; pasync_ctx->async_data.pa_base.u.a64.address = @@ -2909,31 +3067,84 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) num_async_data = ((mem_descr->mem_array[idx].size) / phba->params.defpdu_data_sz); + num_per_mem = 0; + + for (index = 0; index < BEISCSI_GET_CID_COUNT + (phba, ulp_num); index++) { + pasync_header_h->cri = -1; + pasync_header_h->index = (char)index; + INIT_LIST_HEAD(&pasync_header_h->link); + pasync_header_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx-> + async_header.va_base) + + (p->defpdu_hdr_sz * index)); + + pasync_header_h->pa.u.a64.address = + pasync_ctx->async_header.pa_base.u.a64. + address + (p->defpdu_hdr_sz * index); + + list_add_tail(&pasync_header_h->link, + &pasync_ctx->async_header. + free_list); + pasync_header_h++; + pasync_ctx->async_header.free_entries++; + pasync_ctx->async_header.writables++; + + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + wait_queue.list); + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + header_busy_list); + pasync_data_h->cri = -1; + pasync_data_h->index = (char)index; + INIT_LIST_HEAD(&pasync_data_h->link); + + if (!num_async_data) { + num_per_mem = 0; + idx++; + pasync_ctx->async_data.va_base = + mem_descr->mem_array[idx]. + virtual_address; + pasync_ctx->async_data.pa_base.u. + a64.address = + mem_descr->mem_array[idx]. + bus_address.u.a64.address; + num_async_data = + ((mem_descr->mem_array[idx]. + size) / + phba->params.defpdu_data_sz); + } + pasync_data_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx->async_data.va_base) + + (p->defpdu_data_sz * num_per_mem)); + + pasync_data_h->pa.u.a64.address = + pasync_ctx->async_data.pa_base.u.a64. + address + (p->defpdu_data_sz * + num_per_mem); + num_per_mem++; + num_async_data--; + + list_add_tail(&pasync_data_h->link, + &pasync_ctx->async_data. + free_list); + pasync_data_h++; + pasync_ctx->async_data.free_entries++; + pasync_ctx->async_data.writables++; + + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + data_busy_list); + } + + pasync_ctx->async_header.host_write_ptr = 0; + pasync_ctx->async_header.ep_read_ptr = -1; + pasync_ctx->async_data.host_write_ptr = 0; + pasync_ctx->async_data.ep_read_ptr = -1; } - pasync_data_h->pbuffer = - (void *)((unsigned long) - (pasync_ctx->async_data.va_base) + - (p->defpdu_data_sz * num_per_mem)); - - pasync_data_h->pa.u.a64.address = - pasync_ctx->async_data.pa_base.u.a64.address + - (p->defpdu_data_sz * num_per_mem); - num_per_mem++; - num_async_data--; - - list_add_tail(&pasync_data_h->link, - &pasync_ctx->async_data.free_list); - pasync_data_h++; - pasync_ctx->async_data.free_entries++; - pasync_ctx->async_data.writables++; - - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); - } - - pasync_ctx->async_header.host_write_ptr = 0; - pasync_ctx->async_header.ep_read_ptr = -1; - pasync_ctx->async_data.host_write_ptr = 0; - pasync_ctx->async_data.ep_read_ptr = -1; + } + + return 0; } static int @@ -3128,7 +3339,7 @@ static int beiscsi_create_def_hdr(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, - unsigned int def_pdu_ring_sz) + unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; @@ -3138,36 +3349,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba, void *dq_vaddress; idx = 0; - dq = &phwi_context->be_def_hdrq; + dq = &phwi_context->be_def_hdrq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dq->dma_mem; mem_descr = phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_RING; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); + "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", + ulp_num); + return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, def_pdu_ring_sz, - phba->params.defpdu_hdr_sz); + phba->params.defpdu_hdr_sz, + BEISCSI_DEFQ_HDR, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); + "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", + ulp_num); + return ret; } - phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : iscsi def pdu id is %d\n", - phwi_context->be_def_hdrq.id); - hwi_post_async_buffers(phba, 1); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_hdrq[ulp_num].id); + hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num); return 0; } @@ -3175,7 +3392,7 @@ static int beiscsi_create_def_data(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, - unsigned int def_pdu_ring_sz) + unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; @@ -3185,40 +3402,83 @@ beiscsi_create_def_data(struct beiscsi_hba *phba, void *dq_vaddress; idx = 0; - dataq = &phwi_context->be_def_dataq; + dataq = &phwi_context->be_def_dataq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dataq->dma_mem; mem_descr = phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_RING; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); + "BM_%d : be_fill_queue Failed for DEF PDU " + "DATA on ULP : %d\n", + ulp_num); + return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, def_pdu_ring_sz, - phba->params.defpdu_data_sz); + phba->params.defpdu_data_sz, + BEISCSI_DEFQ_DATA, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d be_cmd_create_default_pdu_queue" - " Failed for DEF PDU DATA\n"); + " Failed for DEF PDU DATA on ULP : %d\n", + ulp_num); return ret; } - phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : iscsi def data id is %d\n", - phwi_context->be_def_dataq.id); + "BM_%d : iscsi def data id on ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_dataq[ulp_num].id); - hwi_post_async_buffers(phba, 0); + hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : DEFAULT PDU DATA RING CREATED\n"); + "BM_%d : DEFAULT PDU DATA RING CREATED" + "on ULP : %d\n", ulp_num); + + return 0; +} + + +static int +beiscsi_post_template_hdr(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr; + struct mem_array *pm_arr; + struct be_dma_mem sgl; + int status, ulp_num; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + pm_arr = mem_descr->mem_array; + hwi_build_be_sgl_arr(phba, pm_arr, &sgl); + status = be_cmd_iscsi_post_template_hdr( + &phba->ctrl, &sgl); + + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Post Template HDR Failed for" + "ULP_%d\n", ulp_num); + return status; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Template HDR Pages Posted for" + "ULP_%d\n", ulp_num); + } + } return 0; } @@ -3229,14 +3489,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba) struct mem_array *pm_arr; unsigned int page_offset, i; struct be_dma_mem sgl; - int status; + int status, ulp_num = 0; mem_descr = phba->init_mem; mem_descr += HWI_MEM_SGE; pm_arr = mem_descr->mem_array; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * - phba->fw_config.iscsi_icd_start) / PAGE_SIZE; + phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; for (i = 0; i < mem_descr->num_elements; i++) { hwi_build_be_sgl_arr(phba, pm_arr, &sgl); status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, @@ -3288,12 +3552,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, { unsigned int wrb_mem_index, offset, size, num_wrb_rings; u64 pa_addr_lo; - unsigned int idx, num, i; + unsigned int idx, num, i, ulp_num; struct mem_array *pwrb_arr; void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; + struct hwi_wrb_context *pwrb_context; int status; + uint8_t ulp_count = 0, ulp_base_num = 0; + uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; idx = 0; mem_descr = phba->init_mem; @@ -3337,22 +3604,45 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, num_wrb_rings--; } } + + /* Get the ULP Count */ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + ulp_count++; + ulp_base_num = ulp_num; + cid_count_ulp[ulp_num] = + BEISCSI_GET_CID_COUNT(phba, ulp_num); + } + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { wrb_mem_index = 0; offset = 0; size = 0; + if (ulp_count > 1) { + ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; + + if (!cid_count_ulp[ulp_base_num]) + ulp_base_num = (ulp_base_num + 1) % + BEISCSI_ULP_COUNT; + + cid_count_ulp[ulp_base_num]--; + } + + hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); status = be_cmd_wrbq_create(&phba->ctrl, &sgl, - &phwi_context->be_wrbq[i]); + &phwi_context->be_wrbq[i], + &phwi_ctrlr->wrb_context[i], + ulp_base_num); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : wrbq create failed."); kfree(pwrb_arr); return status; } - phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. - id; + pwrb_context = &phwi_ctrlr->wrb_context[i]; + BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); return 0; @@ -3365,7 +3655,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba) struct hwi_wrb_context *pwrb_context; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); @@ -3394,24 +3684,36 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; - int i, eq_num; + struct hwi_async_pdu_context *pasync_ctx; + int i, eq_for_mcc, ulp_num; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; + + be_cmd_iscsi_remove_template_hdr(ctrl); + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { q = &phwi_context->be_wrbq[i]; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } + kfree(phwi_context->be_wrbq); free_wrb_handles(phba); - q = &phwi_context->be_def_hdrq; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { - q = &phwi_context->be_def_dataq; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + q = &phwi_context->be_def_hdrq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + + q = &phwi_context->be_def_dataq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; + } + } beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); @@ -3420,16 +3722,18 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); } + + be_mcc_queues_destroy(phba); if (phba->msix_enabled) - eq_num = 1; + eq_for_mcc = 1; else - eq_num = 0; - for (i = 0; i < (phba->num_cpus + eq_num); i++) { + eq_for_mcc = 0; + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { q = &phwi_context->be_eq[i].q; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } - be_mcc_queues_destroy(phba); + be_cmd_fw_uninit(ctrl); } static int be_mcc_queues_create(struct beiscsi_hba *phba, @@ -3494,8 +3798,19 @@ static void find_num_cpus(struct beiscsi_hba *phba) BEISCSI_MAX_NUM_CPUS : num_cpus; break; case BE_GEN4: - phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ? - OC_SKH_MAX_NUM_CPUS : num_cpus; + /* + * If eqid_count == 1 fall back to + * INTX mechanism + **/ + if (phba->fw_config.eqid_count == 1) { + enable_msix = 0; + phba->num_cpus = 1; + return; + } + + phba->num_cpus = + (num_cpus > (phba->fw_config.eqid_count - 1)) ? + (phba->fw_config.eqid_count - 1) : num_cpus; break; default: phba->num_cpus = 1; @@ -3508,15 +3823,13 @@ static int hwi_init_port(struct beiscsi_hba *phba) struct hwi_context_memory *phwi_context; unsigned int def_pdu_ring_sz; struct be_ctrl_info *ctrl = &phba->ctrl; - int status; + int status, ulp_num; - def_pdu_ring_sz = - phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - phwi_context->max_eqd = 0; + phwi_context->max_eqd = 128; phwi_context->min_eqd = 0; - phwi_context->cur_eqd = 64; + phwi_context->cur_eqd = 0; be_cmd_fw_initialize(&phba->ctrl); status = beiscsi_create_eqs(phba, phwi_context); @@ -3544,27 +3857,48 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } - status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, - def_pdu_ring_sz); - if (status != 0) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Default Header not created\n"); - goto error; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + def_pdu_ring_sz = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct phys_addr); + + status = beiscsi_create_def_hdr(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Header not created for ULP : %d\n", + ulp_num); + goto error; + } + + status = beiscsi_create_def_data(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Data not created for ULP : %d\n", + ulp_num); + goto error; + } + } } - status = beiscsi_create_def_data(phba, phwi_context, - phwi_ctrlr, def_pdu_ring_sz); + status = beiscsi_post_pages(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Default Data not created\n"); + "BM_%d : Post SGL Pages Failed\n"); goto error; } - status = beiscsi_post_pages(phba); + status = beiscsi_post_template_hdr(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Post SGL Pages Failed\n"); - goto error; + "BM_%d : Template HDR Posting for CXN Failed\n"); } status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); @@ -3574,6 +3908,26 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint16_t async_arr_idx = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + uint16_t cri = 0; + struct hwi_async_pdu_context *pasync_ctx; + + pasync_ctx = HWI_GET_ASYNC_PDU_CTX( + phwi_ctrlr, ulp_num); + for (cri = 0; cri < + phba->params.cxns_per_ctrl; cri++) { + if (ulp_num == BEISCSI_GET_ULP_FROM_CRI + (phwi_ctrlr, cri)) + pasync_ctx->cid_to_async_cri_map[ + phwi_ctrlr->wrb_context[cri].cid] = + async_arr_idx++; + } + } + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_port success\n"); return 0; @@ -3607,7 +3961,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba) if (beiscsi_init_wrb_handle(phba)) return -ENOMEM; - hwi_init_async_pdu_ctx(phba); + if (hwi_init_async_pdu_ctx(phba)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx failed\n"); + return -ENOMEM; + } + if (hwi_init_port(phba) != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_controller failed\n"); @@ -3633,10 +3992,12 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) (unsigned long)mem_descr->mem_array[j - 1]. bus_address.u.a64.address); } + kfree(mem_descr->mem_array); mem_descr++; } kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); } @@ -3671,6 +4032,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) struct sgl_handle *psgl_handle; struct iscsi_sge *pfrag; unsigned int arr_index, i, idx; + unsigned int ulp_icd_start, ulp_num = 0; phba->io_sgl_hndl_avbl = 0; phba->eh_sgl_hndl_avbl = 0; @@ -3737,6 +4099,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) "\n BM_%d : mem_descr_sg->num_elements=%d\n", mem_descr_sg->num_elements); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + + ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + arr_index = 0; idx = 0; while (idx < mem_descr_sg->num_elements) { @@ -3755,8 +4123,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); pfrag += phba->params.num_sge_per_io; - psgl_handle->sgl_index = - phba->fw_config.iscsi_icd_start + arr_index++; + psgl_handle->sgl_index = ulp_icd_start + arr_index++; } idx++; } @@ -3769,32 +4136,105 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { - int i, new_cid; + int ret; + uint16_t i, ulp_num; + struct ulp_cid_info *ptr_cid_info = NULL; - phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, - GFP_KERNEL); - if (!phba->cid_array) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Failed to allocate memory in " - "hba_setup_cid_tbls\n"); - return -ENOMEM; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), + GFP_KERNEL); + + if (!ptr_cid_info) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory" + "for ULP_CID_INFO for ULP : %d\n", + ulp_num); + ret = -ENOMEM; + goto free_memory; + + } + + /* Allocate memory for CID array */ + ptr_cid_info->cid_array = kzalloc(sizeof(void *) * + BEISCSI_GET_CID_COUNT(phba, + ulp_num), GFP_KERNEL); + if (!ptr_cid_info->cid_array) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory" + "for CID_ARRAY for ULP : %d\n", + ulp_num); + kfree(ptr_cid_info); + ptr_cid_info = NULL; + ret = -ENOMEM; + + goto free_memory; + } + ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( + phba, ulp_num); + + /* Save the cid_info_array ptr */ + phba->cid_array_info[ulp_num] = ptr_cid_info; + } } phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * - phba->params.cxns_per_ctrl * 2, GFP_KERNEL); + phba->params.cxns_per_ctrl, GFP_KERNEL); if (!phba->ep_array) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory in " "hba_setup_cid_tbls\n"); - kfree(phba->cid_array); - return -ENOMEM; + ret = -ENOMEM; + + goto free_memory; + } + + phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * + phba->params.cxns_per_ctrl, GFP_KERNEL); + if (!phba->conn_table) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory in" + "hba_setup_cid_tbls\n"); + + kfree(phba->ep_array); + phba->ep_array = NULL; + ret = -ENOMEM; + + goto free_memory; } - new_cid = phba->fw_config.iscsi_cid_start; + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { - phba->cid_array[i] = new_cid; - new_cid += 2; + ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; + + ptr_cid_info = phba->cid_array_info[ulp_num]; + ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = + phba->phwi_ctrlr->wrb_context[i].cid; + + } + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + ptr_cid_info->cid_alloc = 0; + ptr_cid_info->cid_free = 0; + } } - phba->avlbl_cids = phba->params.cxns_per_ctrl; return 0; + +free_memory: + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } + + return ret; } static void hwi_enable_intr(struct beiscsi_hba *phba) @@ -3904,12 +4344,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) goto boot_freemem; } - ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); + ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BM_%d : beiscsi_get_session_info Failed"); - goto boot_freemem; + + if (ret != -EBUSY) + goto boot_freemem; + else + return ret; } session_resp = nonemb_cmd.va ; @@ -4049,19 +4493,85 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) static void beiscsi_clean_port(struct beiscsi_hba *phba) { - int mgmt_status; - - mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); - if (mgmt_status) - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : mgmt_epfw_cleanup FAILED\n"); + int mgmt_status, ulp_num; + struct ulp_cid_info *ptr_cid_info = NULL; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); + if (mgmt_status) + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : mgmt_epfw_cleanup FAILED" + " for ULP_%d\n", ulp_num); + } + } hwi_purge_eq(phba); hwi_cleanup(phba); kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); - kfree(phba->cid_array); kfree(phba->ep_array); + kfree(phba->conn_table); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } + +} + +/** + * beiscsi_free_mgmt_task_handles()- Free driver CXN resources + * @beiscsi_conn: ptr to the conn to be cleaned up + * @task: ptr to iscsi_task resource to be freed. + * + * Free driver mgmt resources binded to CXN. + **/ +void +beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + io_task = task->dd_data; + + if (io_task->pwrb_handle) { + memset(io_task->pwrb_handle->pwrb, 0, + sizeof(struct iscsi_wrb)); + free_wrb_handle(phba, pwrb_context, + io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + spin_lock_bh(&phba->mgmt_sgl_lock); + free_mgmt_sgl_handle(phba, + io_task->psgl_handle); + io_task->psgl_handle = NULL; + spin_unlock_bh(&phba->mgmt_sgl_lock); + } + + if (io_task->mtask_addr) + pci_unmap_single(phba->pcidev, + io_task->mtask_addr, + io_task->mtask_data_count, + PCI_DMA_TODEVICE); } /** @@ -4078,10 +4588,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, @@ -4102,28 +4613,14 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) spin_unlock(&phba->io_sgl_lock); io_task->psgl_handle = NULL; } - } else { - if (!beiscsi_conn->login_in_progress) { - if (io_task->pwrb_handle) { - free_wrb_handle(phba, pwrb_context, - io_task->pwrb_handle); - io_task->pwrb_handle = NULL; - } - if (io_task->psgl_handle) { - spin_lock(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, - io_task->psgl_handle); - spin_unlock(&phba->mgmt_sgl_lock); - io_task->psgl_handle = NULL; - } - if (io_task->mtask_addr) { - pci_unmap_single(phba->pcidev, - io_task->mtask_addr, - io_task->mtask_data_count, - PCI_DMA_TODEVICE); - io_task->mtask_addr = 0; - } + + if (io_task->scsi_cmnd) { + scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; } + } else { + if (!beiscsi_conn->login_in_progress) + beiscsi_free_mgmt_task_handles(beiscsi_conn, task); } } @@ -4142,19 +4639,18 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, * login/startup related tasks. */ beiscsi_conn->login_in_progress = 0; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->back_lock); beiscsi_cleanup_task(task); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); - pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start)); + pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); /* Check for the adapter family */ - if (chip_skh_r(phba->pcidev)) - beiscsi_offload_cxn_v2(params, pwrb_handle); - else + if (is_chip_be2_be3r(phba)) beiscsi_offload_cxn_v0(params, pwrb_handle, phba->init_mem); + else + beiscsi_offload_cxn_v2(params, pwrb_handle); be_dws_le_to_cpu(pwrb_handle->pwrb, sizeof(struct iscsi_target_context_update_wrb)); @@ -4163,8 +4659,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); } static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, @@ -4194,6 +4690,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; itt_t itt; + uint16_t cri_index = 0; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; @@ -4223,8 +4720,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) goto free_hndls; } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4236,6 +4732,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } else { io_task->scsi_cmnd = NULL; if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { + beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = (struct sgl_handle *) @@ -4257,8 +4754,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->psgl_handle; io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | @@ -4278,7 +4774,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->pwrb_handle = beiscsi_conn->plogin_wrb_handle; } - beiscsi_conn->task = task; } else { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); @@ -4295,8 +4790,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4324,12 +4818,13 @@ free_io_hndls: free_mgmt_hndls: spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; spin_unlock(&phba->mgmt_sgl_lock); free_hndls: phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[ - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; @@ -4351,7 +4846,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; - memset(pwrb, 0, sizeof(*pwrb)); io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; io_task->bhs_len = sizeof(struct be_cmd_bhs); @@ -4391,7 +4885,8 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4446,7 +4941,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4465,19 +4961,7 @@ static int beiscsi_mtask(struct iscsi_task *task) pwrb = io_task->pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); - if (chip_skh_r(phba->pcidev)) { - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, - be32_to_cpu(task->cmdsn)); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, - io_task->pwrb_handle->wrb_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, - io_task->psgl_handle->sgl_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, - task->data_count); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, - io_task->pwrb_handle->nxt_wrb_index); - pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, @@ -4489,6 +4973,18 @@ static int beiscsi_mtask(struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, io_task->pwrb_handle->nxt_wrb_index); pwrb_typeoffset = BE_WRB_TYPE_OFFSET; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; } @@ -4501,19 +4997,19 @@ static int beiscsi_mtask(struct iscsi_task *task) case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 1); } else { ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 0); } hwi_write_buffer(pwrb, task); @@ -4540,15 +5036,16 @@ static int beiscsi_mtask(struct iscsi_task *task) } /* Set the task type */ - io_task->wrb_type = (chip_skh_r(phba->pcidev)) ? - AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) : - AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); + io_task->wrb_type = (is_chip_be2_be3r(phba)) ? + AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : + AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4573,8 +5070,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task) struct beiscsi_hba *phba = NULL; phba = ((struct beiscsi_conn *)conn->dd_data)->phba; - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, - "BM_%d : scsi_dma_map Failed\n"); + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, + "BM_%d : scsi_dma_map Failed " + "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", + be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), + io_task->libiscsi_itt, scsi_bufflen(sc)); return num_sg; } @@ -4679,10 +5180,12 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) /* * beiscsi_quiesce()- Cleanup Driver resources * @phba: Instance Priv structure + * @unload_state:i Clean or EEH unload state * * Free the OS and HW resources held by the driver **/ -static void beiscsi_quiesce(struct beiscsi_hba *phba) +static void beiscsi_quiesce(struct beiscsi_hba *phba, + uint32_t unload_state) { struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; @@ -4695,28 +5198,36 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba) if (phba->msix_enabled) { for (i = 0; i <= phba->num_cpus; i++) { msix_vec = phba->msix_entries[i].vector; + synchronize_irq(msix_vec); free_irq(msix_vec, &phwi_context->be_eq[i]); kfree(phba->msi_name[i]); } } else - if (phba->pcidev->irq) + if (phba->pcidev->irq) { + synchronize_irq(phba->pcidev->irq); free_irq(phba->pcidev->irq, phba); - pci_disable_msix(phba->pcidev); - destroy_workqueue(phba->wq); - if (blk_iopoll_enabled) - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); } + pci_disable_msix(phba->pcidev); - beiscsi_clean_port(phba); - beiscsi_free_mem(phba); + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } - beiscsi_unmap_pci_function(phba); - pci_free_consistent(phba->pcidev, - phba->ctrl.mbox_mem_alloced.size, - phba->ctrl.mbox_mem_alloced.va, - phba->ctrl.mbox_mem_alloced.dma); + if (unload_state == BEISCSI_CLEAN_UNLOAD) { + destroy_workqueue(phba->wq); + beiscsi_clean_port(phba); + beiscsi_free_mem(phba); + + beiscsi_unmap_pci_function(phba); + pci_free_consistent(phba->pcidev, + phba->ctrl.mbox_mem_alloced.size, + phba->ctrl.mbox_mem_alloced.va, + phba->ctrl.mbox_mem_alloced.dma); + } else { + hwi_purge_eq(phba); + hwi_cleanup(phba); + } cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); } @@ -4733,11 +5244,13 @@ static void beiscsi_remove(struct pci_dev *pcidev) } beiscsi_destroy_def_ifaces(phba); - beiscsi_quiesce(phba); + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); iscsi_boot_destroy_kset(phba->boot_kset); iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); + pci_disable_pcie_error_reporting(pcidev); + pci_set_drvdata(pcidev, NULL); pci_disable_device(pcidev); } @@ -4752,7 +5265,9 @@ static void beiscsi_shutdown(struct pci_dev *pcidev) return; } - beiscsi_quiesce(phba); + phba->state = BE_ADAPTER_STATE_SHUTDOWN; + iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); pci_disable_device(pcidev); } @@ -4771,6 +5286,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba) return; } +static void be_eqd_update(struct beiscsi_hba *phba) +{ + struct be_set_eqd set_eqd[MAX_CPUS]; + struct be_aic_obj *aic; + struct be_eq_obj *pbe_eq; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + int eqd, i, num = 0; + ulong now; + u32 pps, delta; + unsigned int tag; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i <= phba->num_cpus; i++) { + aic = &phba->aic_obj[i]; + pbe_eq = &phwi_context->be_eq[i]; + now = jiffies; + if (!aic->jiffs || time_before(now, aic->jiffs) || + pbe_eq->cq_count < aic->eq_prev) { + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + continue; + } + delta = jiffies_to_msecs(now - aic->jiffs); + pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); + eqd = (pps / 1500) << 2; + + if (eqd < 8) + eqd = 0; + eqd = min_t(u32, eqd, phwi_context->max_eqd); + eqd = max_t(u32, eqd, phwi_context->min_eqd); + + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + + if (eqd != aic->prev_eqd) { + set_eqd[num].delay_multiplier = (eqd * 65)/100; + set_eqd[num].eq_id = pbe_eq->q.id; + aic->prev_eqd = eqd; + num++; + } + } + if (num) { + tag = be_cmd_modify_eq_delay(phba, set_eqd, num); + if (tag) + beiscsi_mccq_compl(phba, tag, NULL, NULL); + } +} + /* * beiscsi_hw_health_check()- Check adapter health * @work: work item to check HW health @@ -4784,12 +5350,161 @@ beiscsi_hw_health_check(struct work_struct *work) container_of(work, struct beiscsi_hba, beiscsi_hw_check_task.work); + be_eqd_update(phba); + beiscsi_ue_detect(phba); schedule_delayed_work(&phba->beiscsi_hw_check_task, msecs_to_jiffies(1000)); } + +static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct beiscsi_hba *phba = NULL; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + phba->state |= BE_ADAPTER_PCI_ERR; + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH error detected\n"); + + beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); + + if (state == pci_channel_io_perm_failure) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH : State PERM Failure"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_disable_device(pdev); + + /* The error could cause the FW to trigger a flash debug dump. + * Resetting the card while flash dump is in progress + * can cause it not to recover; wait for it to finish. + * Wait only for first function as it is needed only once per + * adapter. + **/ + if (pdev->devfn == 0) + ssleep(30); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) +{ + struct beiscsi_hba *phba = NULL; + int status = 0; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset\n"); + + status = pci_enable_device(pdev); + if (status) + return PCI_ERS_RESULT_DISCONNECT; + + pci_set_master(pdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* Wait for the CHIP Reset to complete */ + status = be_chk_reset_complete(phba); + if (!status) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completed\n"); + } else { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completion Failure\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + return PCI_ERS_RESULT_RECOVERED; +} + +static void beiscsi_eeh_resume(struct pci_dev *pdev) +{ + int ret = 0, i; + struct be_eq_obj *pbe_eq; + struct beiscsi_hba *phba = NULL; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + pci_save_state(pdev); + + if (enable_msix) + find_num_cpus(phba); + else + phba->num_cpus = 1; + + if (enable_msix) { + beiscsi_msix_enable(phba); + if (!phba->msix_enabled) + phba->num_cpus = 1; + } + + ret = beiscsi_cmd_reset_function(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Reset Failed\n"); + goto ret_err; + } + + ret = be_chk_reset_complete(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to get out of reset.\n"); + goto ret_err; + } + + beiscsi_get_params(phba); + phba->shost->max_id = phba->params.cxns_per_ctrl; + phba->shost->can_queue = phba->params.ios_per_ctrl; + ret = hwi_init_controller(phba); + + for (i = 0; i < MAX_MCC_CMD; i++) { + init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); + phba->ctrl.mcc_tag[i] = i + 1; + phba->ctrl.mcc_numtag[i + 1] = 0; + phba->ctrl.mcc_tag_available++; + } + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); + } + + i = (phba->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + + ret = beiscsi_init_irqs(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_eeh_resume - " + "Failed to beiscsi_init_irqs\n"); + goto ret_err; + } + + hwi_enable_intr(phba); + phba->state &= ~BE_ADAPTER_PCI_ERR; + + return; +ret_err: + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : AER EEH Resume Failed\n"); +} + static int beiscsi_dev_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { @@ -4797,7 +5512,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_eq_obj *pbe_eq; - int ret, i; + int ret = 0, i; ret = beiscsi_enable_pci(pcidev); if (ret < 0) { @@ -4813,10 +5528,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, goto disable_pci; } + /* Enable EEH reporting */ + ret = pci_enable_pcie_error_reporting(pcidev); + if (ret) + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : PCIe Error Reporting " + "Enabling Failed\n"); + + pci_save_state(pcidev); + /* Initialize Driver configuration Paramters */ beiscsi_hba_attrs_init(phba); phba->fw_timeout = false; + phba->mac_addr_set = false; switch (pcidev->device) { @@ -4834,24 +5559,11 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, case OC_SKH_ID1: phba->generation = BE_GEN4; phba->iotask_fn = beiscsi_iotask_v2; + break; default: phba->generation = 0; } - if (enable_msix) - find_num_cpus(phba); - else - phba->num_cpus = 1; - - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : num_cpus = %d\n", - phba->num_cpus); - - if (enable_msix) { - beiscsi_msix_enable(phba); - if (!phba->msix_enabled) - phba->num_cpus = 1; - } ret = be_ctrl_init(phba, pcidev); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -4863,27 +5575,43 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, ret = beiscsi_cmd_reset_function(phba); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Reset Failed. Aborting Crashdump\n"); + "BM_%d : Reset Failed\n"); goto hba_free; } ret = be_chk_reset_complete(phba); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Failed to get out of reset." - "Aborting Crashdump\n"); + "BM_%d : Failed to get out of reset.\n"); goto hba_free; } spin_lock_init(&phba->io_sgl_lock); spin_lock_init(&phba->mgmt_sgl_lock); spin_lock_init(&phba->isr_lock); + spin_lock_init(&phba->async_pdu_lock); ret = mgmt_get_fw_config(&phba->ctrl, phba); if (ret != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Error getting fw config\n"); goto free_port; } - phba->shost->max_id = phba->fw_config.iscsi_cid_count; + + if (enable_msix) + find_num_cpus(phba); + else + phba->num_cpus = 1; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : num_cpus = %d\n", + phba->num_cpus); + + if (enable_msix) { + beiscsi_msix_enable(phba); + if (!phba->msix_enabled) + phba->num_cpus = 1; + } + + phba->shost->max_id = phba->params.cxns_per_ctrl; beiscsi_get_params(phba); phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_init_port(phba); @@ -4894,18 +5622,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, goto free_port; } - for (i = 0; i < MAX_MCC_CMD ; i++) { + for (i = 0; i < MAX_MCC_CMD; i++) { init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); phba->ctrl.mcc_tag[i] = i + 1; phba->ctrl.mcc_numtag[i + 1] = 0; phba->ctrl.mcc_tag_available++; + memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, + sizeof(struct be_dma_mem)); } phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", phba->shost->host_no); - phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); + phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); if (!phba->wq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_dev_probe-" @@ -4919,32 +5649,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - if (blk_iopoll_enabled) { - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, - be_iopoll); - blk_iopoll_enable(&pbe_eq->iopoll); - } - - i = (phba->msix_enabled) ? i : 0; - /* Work item for MCC handling */ + for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); - } else { - if (phba->msix_enabled) { - for (i = 0; i <= phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - INIT_WORK(&pbe_eq->work_cqs, - beiscsi_process_all_cqs); - } - } else { - pbe_eq = &phwi_context->be_eq[0]; - INIT_WORK(&pbe_eq->work_cqs, - beiscsi_process_all_cqs); - } + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); } + i = (phba->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + ret = beiscsi_init_irqs(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -4954,6 +5670,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, } hwi_enable_intr(phba); + if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) + goto free_blkenbld; + if (beiscsi_setup_boot_info(phba)) /* * log error but continue, because we may not be using @@ -4973,11 +5692,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, free_blkenbld: destroy_workqueue(phba->wq); - if (blk_iopoll_enabled) - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); - } + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } free_twq: beiscsi_clean_port(phba); beiscsi_free_mem(phba); @@ -4998,6 +5716,12 @@ disable_pci: return ret; } +static struct pci_error_handlers beiscsi_eeh_handlers = { + .error_detected = beiscsi_eeh_err_detected, + .slot_reset = beiscsi_eeh_reset, + .resume = beiscsi_eeh_resume, +}; + struct iscsi_transport beiscsi_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_NAME, @@ -5036,7 +5760,8 @@ static struct pci_driver beiscsi_pci_driver = { .probe = beiscsi_dev_probe, .remove = beiscsi_remove, .shutdown = beiscsi_shutdown, - .id_table = beiscsi_pci_id_table + .id_table = beiscsi_pci_id_table, + .err_handler = &beiscsi_eeh_handlers }; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 5946577d79d..9ceab426eec 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -26,6 +26,7 @@ #include <linux/in.h> #include <linux/ctype.h> #include <linux/module.h> +#include <linux/aer.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -34,9 +35,8 @@ #include <scsi/libiscsi.h> #include <scsi/scsi_transport_iscsi.h> -#include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.0.272.0" +#define BUILD_STR "10.2.273.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -66,13 +66,14 @@ #define MAX_CPUS 64 #define BEISCSI_MAX_NUM_CPUS 7 -#define OC_SKH_MAX_NUM_CPUS 63 +#define BEISCSI_VER_STRLEN 32 #define BEISCSI_SGLIST_ELEMENTS 30 -#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ -#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ +#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ +#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ +#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ #define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */ @@ -96,14 +97,24 @@ #define INVALID_SESS_HANDLE 0xFFFFFFFF -#define BE_ADAPTER_UP 0x00000000 -#define BE_ADAPTER_LINK_DOWN 0x00000001 +/** + * Adapter States + **/ +#define BE_ADAPTER_LINK_UP 0x001 +#define BE_ADAPTER_LINK_DOWN 0x002 +#define BE_ADAPTER_PCI_ERR 0x004 +#define BE_ADAPTER_STATE_SHUTDOWN 0x008 + + +#define BEISCSI_CLEAN_UNLOAD 0x01 +#define BEISCSI_EEH_UNLOAD 0x02 /** * hardware needs the async PDU buffers to be posted in multiples of 8 * So have atleast 8 of them by default */ -#define HWI_GET_ASYNC_PDU_CTX(phwi) (phwi->phwi_ctxt->pasync_ctx) +#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num) \ + (phwi->phwi_ctxt->pasync_ctx[ulp_num]) /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc @@ -129,11 +140,15 @@ #define DB_RXULP0_OFFSET 0xA0 /********* Event Q door bell *************/ #define DB_EQ_OFFSET DB_CQ_OFFSET -#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ +#define DB_EQ_RING_ID_LOW_MASK 0x1FF /* bits 0 - 8 */ /* Clear the interrupt for this eq */ #define DB_EQ_CLR_SHIFT (9) /* bit 9 */ /* Must be 1 */ #define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ +/* Higher Order EQ_ID bit */ +#define DB_EQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */ +#define DB_EQ_HIGH_SET_SHIFT 11 +#define DB_EQ_HIGH_FEILD_SHIFT 9 /* Number of event entries processed */ #define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ /* Rearm bit */ @@ -141,36 +156,53 @@ /********* Compl Q door bell *************/ #define DB_CQ_OFFSET 0x120 -#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_CQ_RING_ID_LOW_MASK 0x3FF /* bits 0 - 9 */ +/* Higher Order CQ_ID bit */ +#define DB_CQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */ +#define DB_CQ_HIGH_SET_SHIFT 11 +#define DB_CQ_HIGH_FEILD_SHIFT 10 + /* Number of event entries processed */ #define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ /* Rearm bit */ #define DB_CQ_REARM_SHIFT (29) /* bit 29 */ #define GET_HWI_CONTROLLER_WS(pc) (pc->phwi_ctrlr) -#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\ - (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id) -#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\ - (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id) +#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\ + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id) +#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\ + (GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id) #define PAGES_REQUIRED(x) \ ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) #define BEISCSI_MSI_NAME 20 /* size of msi_name string */ +#define MEM_DESCR_OFFSET 8 +#define BEISCSI_DEFQ_HDR 1 +#define BEISCSI_DEFQ_DATA 0 enum be_mem_enum { HWI_MEM_ADDN_CONTEXT, HWI_MEM_WRB, HWI_MEM_WRBH, HWI_MEM_SGLH, HWI_MEM_SGE, - HWI_MEM_ASYNC_HEADER_BUF, /* 5 */ - HWI_MEM_ASYNC_DATA_BUF, - HWI_MEM_ASYNC_HEADER_RING, - HWI_MEM_ASYNC_DATA_RING, - HWI_MEM_ASYNC_HEADER_HANDLE, - HWI_MEM_ASYNC_DATA_HANDLE, /* 10 */ - HWI_MEM_ASYNC_PDU_CONTEXT, + HWI_MEM_TEMPLATE_HDR_ULP0, + HWI_MEM_ASYNC_HEADER_BUF_ULP0, /* 6 */ + HWI_MEM_ASYNC_DATA_BUF_ULP0, + HWI_MEM_ASYNC_HEADER_RING_ULP0, + HWI_MEM_ASYNC_DATA_RING_ULP0, + HWI_MEM_ASYNC_HEADER_HANDLE_ULP0, + HWI_MEM_ASYNC_DATA_HANDLE_ULP0, /* 11 */ + HWI_MEM_ASYNC_PDU_CONTEXT_ULP0, + HWI_MEM_TEMPLATE_HDR_ULP1, + HWI_MEM_ASYNC_HEADER_BUF_ULP1, /* 14 */ + HWI_MEM_ASYNC_DATA_BUF_ULP1, + HWI_MEM_ASYNC_HEADER_RING_ULP1, + HWI_MEM_ASYNC_DATA_RING_ULP1, + HWI_MEM_ASYNC_HEADER_HANDLE_ULP1, + HWI_MEM_ASYNC_DATA_HANDLE_ULP1, /* 19 */ + HWI_MEM_ASYNC_PDU_CONTEXT_ULP1, ISCSI_MEM_GLOBAL_HEADER, SE_MEM_MAX }; @@ -265,7 +297,49 @@ struct invalidate_command_table { unsigned short cid; } __packed; -#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1) +#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ + (phwi_ctrlr->wrb_context[cri].ulp_num) +struct hwi_wrb_context { + struct list_head wrb_handle_list; + struct list_head wrb_handle_drvr_list; + struct wrb_handle **pwrb_handle_base; + struct wrb_handle **pwrb_handle_basestd; + struct iscsi_wrb *plast_wrb; + unsigned short alloc_index; + unsigned short free_index; + unsigned short wrb_handles_available; + unsigned short cid; + uint8_t ulp_num; /* ULP to which CID binded */ + uint16_t register_set; + uint16_t doorbell_format; + uint32_t doorbell_offset; +}; + +struct ulp_cid_info { + unsigned short *cid_array; + unsigned short avlbl_cids; + unsigned short cid_alloc; + unsigned short cid_free; +}; + +#include "be.h" +#define chip_be2(phba) (phba->generation == BE_GEN2) +#define chip_be3_r(phba) (phba->generation == BE_GEN3) +#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) + +#define BEISCSI_ULP0 0 +#define BEISCSI_ULP1 1 +#define BEISCSI_ULP_COUNT 2 +#define BEISCSI_ULP0_LOADED 0x01 +#define BEISCSI_ULP1_LOADED 0x02 + +#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \ + (((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids) +#define BEISCSI_ULP0_AVLBL_CID(phba) \ + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0) +#define BEISCSI_ULP1_AVLBL_CID(phba) \ + BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1) + struct beiscsi_hba { struct hba_parameters params; struct hwi_controller *phwi_ctrlr; @@ -300,14 +374,17 @@ struct beiscsi_hba { spinlock_t io_sgl_lock; spinlock_t mgmt_sgl_lock; spinlock_t isr_lock; + spinlock_t async_pdu_lock; unsigned int age; - unsigned short avlbl_cids; - unsigned short cid_alloc; - unsigned short cid_free; - struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2]; struct list_head hba_queue; - unsigned short *cid_array; +#define BE_MAX_SESSION 2048 +#define BE_SET_CID_TO_CRI(cri_index, cid) \ + (phba->cid_to_cri_map[cid] = cri_index) +#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) + unsigned short cid_to_cri_map[BE_MAX_SESSION]; + struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT]; struct iscsi_endpoint **ep_array; + struct beiscsi_conn **conn_table; struct iscsi_boot_kset *boot_kset; struct Scsi_Host *shost; struct iscsi_iface *ipv4_iface; @@ -317,20 +394,21 @@ struct beiscsi_hba { * group together since they are used most frequently * for cid to cri conversion */ - unsigned int iscsi_cid_start; unsigned int phys_port; + unsigned int eqid_count; + unsigned int cqid_count; + unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT]; +#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \ + (phba->fw_config.iscsi_cid_count[ulp_num]) + unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT]; + unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT]; + unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT]; + unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT]; + unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT]; - unsigned int isr_offset; - unsigned int iscsi_icd_start; - unsigned int iscsi_cid_count; - unsigned int iscsi_icd_count; - unsigned int pci_function; - - unsigned short cid_alloc; - unsigned short cid_free; - unsigned short avlbl_cids; unsigned short iscsi_features; - spinlock_t cid_lock; + uint16_t dual_ulp_aware; + unsigned long ulp_supported; } fw_config; unsigned int state; @@ -338,7 +416,9 @@ struct beiscsi_hba { bool ue_detected; struct delayed_work beiscsi_hw_check_task; + bool mac_addr_set; u8 mac_address[ETH_ALEN]; + char fw_ver_str[BEISCSI_VER_STRLEN]; char wq_name[20]; struct workqueue_struct *wq; /* The actuak work queue */ struct be_ctrl_info ctrl; @@ -347,6 +427,7 @@ struct beiscsi_hba { struct mgmt_session_info boot_sess; struct invalidate_command_table inv_tbl[128]; + struct be_aic_obj aic_obj[MAX_CPUS]; unsigned int attr_log_enable; int (*iotask_fn)(struct iscsi_task *, struct scatterlist *sg, @@ -365,6 +446,7 @@ struct beiscsi_conn { struct iscsi_conn *conn; struct beiscsi_hba *phba; u32 exp_statsn; + u32 doorbell_offset; u32 beiscsi_conn_cid; struct beiscsi_endpoint *ep; unsigned short login_in_progress; @@ -465,7 +547,7 @@ struct amap_iscsi_sge { }; struct beiscsi_offload_params { - u32 dw[5]; + u32 dw[6]; }; #define OFFLD_PARAMS_ERL 0x00000003 @@ -495,6 +577,7 @@ struct amap_beiscsi_offload_params { u8 max_r2t[16]; u8 pad[8]; u8 exp_statsn[32]; + u8 max_recv_data_segment_length[32]; }; /* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, @@ -558,12 +641,13 @@ struct hwi_async_pdu_context { unsigned int buffer_size; unsigned int num_entries; - +#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid]) + unsigned short cid_to_async_cri_map[BE_MAX_SESSION]; /** * This is a varying size list! Do not add anything * after this entry!! */ - struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; + struct hwi_async_entry *async_entry; }; #define PDUCQE_CODE_MASK 0x0000003F @@ -749,7 +833,12 @@ void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); void beiscsi_process_all_cqs(struct work_struct *work); +void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task); +void hwi_ring_cq_db(struct beiscsi_hba *phba, + unsigned int id, unsigned int num_processed, + unsigned char rearm, unsigned char event); static inline bool beiscsi_error(struct beiscsi_hba *phba) { return phba->ue_detected || phba->fw_timeout; @@ -874,30 +963,32 @@ struct amap_iscsi_target_context_update_wrb_v2 { u8 first_burst_length[24]; /* DWORD 3 */ u8 rsvd3[8]; /* DOWRD 3 */ u8 max_r2t[16]; /* DWORD 4 */ - u8 rsvd4[10]; /* DWORD 4 */ + u8 rsvd4; /* DWORD 4 */ u8 hde; /* DWORD 4 */ u8 dde; /* DWORD 4 */ u8 erl[2]; /* DWORD 4 */ + u8 rsvd5[6]; /* DWORD 4 */ u8 imd; /* DWORD 4 */ u8 ir2t; /* DWORD 4 */ + u8 rsvd6[3]; /* DWORD 4 */ u8 stat_sn[32]; /* DWORD 5 */ - u8 rsvd5[32]; /* DWORD 6 */ - u8 rsvd6[32]; /* DWORD 7 */ + u8 rsvd7[32]; /* DWORD 6 */ + u8 rsvd8[32]; /* DWORD 7 */ u8 max_recv_dataseg_len[24]; /* DWORD 8 */ - u8 rsvd7[8]; /* DWORD 8 */ - u8 rsvd8[32]; /* DWORD 9 */ - u8 rsvd9[32]; /* DWORD 10 */ + u8 rsvd9[8]; /* DWORD 8 */ + u8 rsvd10[32]; /* DWORD 9 */ + u8 rsvd11[32]; /* DWORD 10 */ u8 max_cxns[16]; /* DWORD 11 */ - u8 rsvd10[11]; /* DWORD 11*/ + u8 rsvd12[11]; /* DWORD 11*/ u8 invld; /* DWORD 11 */ - u8 rsvd11;/* DWORD 11*/ + u8 rsvd13;/* DWORD 11*/ u8 dmsg; /* DWORD 11 */ u8 data_seq_inorder; /* DWORD 11 */ u8 pdu_seq_inorder; /* DWORD 11 */ - u8 rsvd12[32]; /*DWORD 12 */ - u8 rsvd13[32]; /* DWORD 13 */ - u8 rsvd14[32]; /* DWORD 14 */ - u8 rsvd15[32]; /* DWORD 15 */ + u8 rsvd14[32]; /*DWORD 12 */ + u8 rsvd15[32]; /* DWORD 13 */ + u8 rsvd16[32]; /* DWORD 14 */ + u8 rsvd17[32]; /* DWORD 15 */ } __packed; @@ -908,6 +999,10 @@ struct be_ring { u32 cidx; /* consumer index */ u32 pidx; /* producer index -- not used by most rings */ u32 item_size; /* size in bytes of one object */ + u8 ulp_num; /* ULP to which CID binded */ + u16 register_set; + u16 doorbell_format; + u32 doorbell_offset; void *va; /* The virtual address of the ring. This * should be last to allow 32 & 64 bit debugger @@ -915,28 +1010,16 @@ struct be_ring { */ }; -struct hwi_wrb_context { - struct list_head wrb_handle_list; - struct list_head wrb_handle_drvr_list; - struct wrb_handle **pwrb_handle_base; - struct wrb_handle **pwrb_handle_basestd; - struct iscsi_wrb *plast_wrb; - unsigned short alloc_index; - unsigned short free_index; - unsigned short wrb_handles_available; - unsigned short cid; -}; - struct hwi_controller { struct list_head io_sgl_list; struct list_head eh_sgl_list; struct sgl_handle *psgl_handle_base; unsigned int wrb_mem_index; - struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2]; + struct hwi_wrb_context *wrb_context; struct mcc_wrb *pmcc_wrb_base; - struct be_ring default_pdu_hdr; - struct be_ring default_pdu_data; + struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; + struct be_ring default_pdu_data[BEISCSI_ULP_COUNT]; struct hwi_context_memory *phwi_ctxt; }; @@ -967,13 +1050,10 @@ struct hwi_context_memory { struct be_eq_obj be_eq[MAX_CPUS]; struct be_queue_info be_cq[MAX_CPUS - 1]; - struct be_queue_info be_def_hdrq; - struct be_queue_info be_def_dataq; - - struct be_queue_info be_wrbq[BE2_MAX_SESSIONS]; - struct be_mcc_wrb_context *pbe_mcc_context; - - struct hwi_async_pdu_context *pasync_ctx; + struct be_queue_info *be_wrbq; + struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT]; + struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT]; + struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT]; }; /* Logging related definitions */ @@ -983,6 +1063,7 @@ struct hwi_context_memory { #define BEISCSI_LOG_EH 0x0008 /* Error Handler */ #define BEISCSI_LOG_IO 0x0010 /* IO Code Path */ #define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */ +#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */ #define beiscsi_log(phba, level, mask, fmt, arg...) \ do { \ diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index a6c2fe4b4d6..07934b0b9ee 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba) } } +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *set_eqd, int num) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_req_modify_eq_delay *req; + unsigned int tag = 0; + int i; + + spin_lock(&ctrl->mbox_lock); + tag = alloc_mcc_tag(phba); + if (!tag) { + spin_unlock(&ctrl->mbox_lock); + return tag; + } + + wrb = wrb_from_mccq(phba); + req = embedded_payload(wrb); + + wrb->tag0 |= tag; + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); + + req->num_eq = cpu_to_le32(num); + for (i = 0; i < num; i++) { + req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); + req->delay[i].phase = 0; + req->delay[i].delay_multiplier = + cpu_to_le32(set_eqd[i].delay_multiplier); + } + + be_mcc_notify(phba); + spin_unlock(&ctrl->mbox_lock); + return tag; +} + /** * mgmt_reopen_session()- Reopen a session based on reopen_type * @phba: Device priv structure instance @@ -278,6 +315,18 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, return tag; } +/** + * mgmt_get_fw_config()- Get the FW config for the function + * @ctrl: ptr to Ctrl Info + * @phba: ptr to the dev priv structure + * + * Get the FW config and resources available for the function. + * The resources are created based on the count received here. + * + * return + * Success: 0 + * Failure: Non-Zero Value + **/ int mgmt_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba) { @@ -291,31 +340,79 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl, be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); + OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, + EMBED_MBX_MAX_PAYLOAD_SIZE); status = be_mbox_notify(ctrl); if (!status) { + uint8_t ulp_num = 0; struct be_fw_cfg *pfw_cfg; pfw_cfg = req; + + if (!is_chip_be2_be3r(phba)) { + phba->fw_config.eqid_count = pfw_cfg->eqid_count; + phba->fw_config.cqid_count = pfw_cfg->cqid_count; + + beiscsi_log(phba, KERN_INFO, + BEISCSI_LOG_INIT, + "BG_%d : EQ_Count : %d CQ_Count : %d\n", + phba->fw_config.eqid_count, + phba->fw_config.cqid_count); + } + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (pfw_cfg->ulp[ulp_num].ulp_mode & + BEISCSI_ULP_ISCSI_INI_MODE) + set_bit(ulp_num, + &phba->fw_config.ulp_supported); + phba->fw_config.phys_port = pfw_cfg->phys_port; - phba->fw_config.iscsi_icd_start = - pfw_cfg->ulp[0].icd_base; - phba->fw_config.iscsi_icd_count = - pfw_cfg->ulp[0].icd_count; - phba->fw_config.iscsi_cid_start = - pfw_cfg->ulp[0].sq_base; - phba->fw_config.iscsi_cid_count = - pfw_cfg->ulp[0].sq_count; - if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BG_%d : FW reported MAX CXNS as %d\t" - "Max Supported = %d.\n", - phba->fw_config.iscsi_cid_count, - BE2_MAX_SESSIONS); - phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + phba->fw_config.iscsi_cid_start[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_base; + phba->fw_config.iscsi_cid_count[ulp_num] = + pfw_cfg->ulp[ulp_num].sq_count; + + phba->fw_config.iscsi_icd_start[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_base; + phba->fw_config.iscsi_icd_count[ulp_num] = + pfw_cfg->ulp[ulp_num].icd_count; + + phba->fw_config.iscsi_chain_start[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_base; + phba->fw_config.iscsi_chain_count[ulp_num] = + pfw_cfg->chain_icd[ulp_num].chain_count; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : Function loaded on ULP : %d\n" + "\tiscsi_cid_count : %d\n" + "\tiscsi_cid_start : %d\n" + "\t iscsi_icd_count : %d\n" + "\t iscsi_icd_start : %d\n", + ulp_num, + phba->fw_config. + iscsi_cid_count[ulp_num], + phba->fw_config. + iscsi_cid_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + phba->fw_config. + iscsi_icd_start[ulp_num]); + } } + + phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & + BEISCSI_FUNC_DUA_MODE); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BG_%d : DUA Mode : 0x%x\n", + phba->fw_config.dual_ulp_aware); + } else { - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_get_fw_config\n"); + status = -EINVAL; } spin_unlock(&ctrl->mbox_lock); @@ -368,6 +465,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->fw_config.iscsi_features = %d\n", phba->fw_config.iscsi_features); + memcpy(phba->fw_ver_str, resp->params.hba_attribs. + firmware_version_string, BEISCSI_VER_STRLEN); } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_check_supported_fw\n"); @@ -385,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) { struct be_cmd_resp_hdr *resp; - struct be_mcc_wrb *wrb = wrb_from_mccq(phba); - struct be_sge *mcc_sge = nonembedded_sgl(wrb); + struct be_mcc_wrb *wrb; + struct be_sge *mcc_sge; unsigned int tag = 0; struct iscsi_bsg_request *bsg_req = job->request; struct be_bsg_vendor_cmd *req = nonemb_cmd->va; @@ -403,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, req->sector = sector; req->offset = offset; spin_lock(&ctrl->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case BEISCSI_WRITE_FLASH: @@ -433,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, return tag; } + wrb = wrb_from_mccq(phba); + mcc_sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, job->request_payload.sg_cnt); mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); @@ -446,7 +546,16 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, return tag; } -int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) +/** + * mgmt_epfw_cleanup()- Inform FW to cleanup data structures. + * @phba: pointer to dev priv structure + * @ulp_num: ULP number. + * + * return + * Success: 0 + * Failure: Non-Zero Value + **/ +int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num) { struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb = wrb_from_mccq(phba); @@ -454,15 +563,14 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) int status = 0; spin_lock(&ctrl->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); - req->chute = chute; - req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba)); - req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba)); + req->chute = (1 << ulp_num); + req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num)); + req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num)); status = be_mcc_notify_wait(phba); if (status) @@ -583,6 +691,16 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, return tag; } +/** + * mgmt_open_connection()- Establish a TCP CXN + * @dst_addr: Destination Address + * @beiscsi_ep: ptr to device endpoint struct + * @nonemb_cmd: ptr to memory allocated for command + * + * return + * Success: Tag number of the MBX Command issued + * Failure: Error code + **/ int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr *dst_addr, struct beiscsi_endpoint *beiscsi_ep, @@ -594,20 +712,23 @@ int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; - struct tcp_connect_and_offload_in *req; + struct tcp_connect_and_offload_in_v1 *req; unsigned short def_hdr_id; unsigned short def_data_id; struct phys_addr template_address = { 0, 0 }; struct phys_addr *ptemplate_address; unsigned int tag = 0; - unsigned int i; + unsigned int i, ulp_num; unsigned short cid = beiscsi_ep->ep_cid; struct be_sge *sge; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba); - def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba); + + ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num; + + def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num); + def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num); ptemplate_address = &template_address; ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); @@ -618,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba, return tag; } wrb = wrb_from_mccq(phba); - memset(wrb, 0, sizeof(*wrb)); sge = nonembedded_sgl(wrb); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); wrb->tag0 |= tag; - be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, - sizeof(*req)); + nonemb_cmd->size); if (dst_addr->sa_family == PF_INET) { __be32 s_addr = daddr_in->sin_addr.s_addr; req->ip_address.ip_type = BE2_IPV4; @@ -674,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba, sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); + + if (!is_chip_be2_be3r(phba)) { + req->hdr.version = MBX_CMD_VER1; + req->tcp_window_size = 0; + req->tcp_window_scale_count = 2; + } + be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; @@ -720,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, int resp_buf_len) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mccq(phba); + struct be_mcc_wrb *wrb; struct be_sge *sge; unsigned int tag; int rc = 0; @@ -732,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, rc = -ENOMEM; goto free_cmd; } - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mccq(phba); wrb->tag0 |= tag; sge = nonembedded_sgl(wrb); @@ -744,19 +872,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); - rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); + rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd); + + if (resp_buf) + memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); + if (rc) { - beiscsi_log(phba, KERN_ERR, + /* Check if the MBX Cmd needs to be re-issued */ + if (rc == -EAGAIN) + return rc; + + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); - rc = -EIO; - goto free_cmd; + if (rc != -EBUSY) + goto free_cmd; + else + return rc; } - - if (resp_buf) - memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); - free_cmd: pci_free_consistent(ctrl->pdev, nonemb_cmd->size, nonemb_cmd->va, nonemb_cmd->dma); @@ -859,7 +993,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, uint32_t boot_proto) { struct be_cmd_get_def_gateway_resp gtway_addr_set; - struct be_cmd_get_if_info_resp if_info; + struct be_cmd_get_if_info_resp *if_info; struct be_cmd_set_dhcp_req *dhcpreq; struct be_cmd_rel_dhcp_req *reldhcp; struct be_dma_mem nonemb_cmd; @@ -870,7 +1004,6 @@ int mgmt_set_ip(struct beiscsi_hba *phba, if (mgmt_get_all_if_id(phba)) return -EIO; - memset(&if_info, 0, sizeof(if_info)); ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? BE2_IPV6 : BE2_IPV4 ; @@ -879,7 +1012,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, return rc; if (boot_proto == ISCSI_BOOTPROTO_DHCP) { - if (if_info.dhcp_state) { + if (if_info->dhcp_state) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : DHCP Already Enabled\n"); return 0; @@ -892,9 +1025,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba, IP_V6_LEN : IP_V4_LEN; } else { - if (if_info.dhcp_state) { + if (if_info->dhcp_state) { - memset(&if_info, 0, sizeof(if_info)); + memset(if_info, 0, sizeof(*if_info)); rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, sizeof(*reldhcp)); @@ -917,8 +1050,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba, } /* Delete the Static IP Set */ - if (if_info.ip_addr.addr[0]) { - rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL, + if (if_info->ip_addr.addr[0]) { + rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL, IP_ACTION_DEL); if (rc) return rc; @@ -964,7 +1097,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba, return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); } else { - return mgmt_static_ip_modify(phba, &if_info, ip_param, + return mgmt_static_ip_modify(phba, if_info, ip_param, subnet_param, IP_ACTION_ADD); } @@ -1029,27 +1162,64 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, } int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, - struct be_cmd_get_if_info_resp *if_info) + struct be_cmd_get_if_info_resp **if_info) { struct be_cmd_get_if_info_req *req; struct be_dma_mem nonemb_cmd; + uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp); int rc; if (mgmt_get_all_if_id(phba)) return -EIO; - rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, - OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, - sizeof(*if_info)); - if (rc) - return rc; + do { + rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, + OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, + ioctl_size); + if (rc) + return rc; - req = nonemb_cmd.va; - req->interface_hndl = phba->interface_handle; - req->ip_type = ip_type; + req = nonemb_cmd.va; + req->interface_hndl = phba->interface_handle; + req->ip_type = ip_type; - return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info, - sizeof(*if_info)); + /* Allocate memory for if_info */ + *if_info = kzalloc(ioctl_size, GFP_KERNEL); + if (!*if_info) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, + "BG_%d : Memory Allocation Failure\n"); + + /* Free the DMA memory for the IOCTL issuing */ + pci_free_consistent(phba->ctrl.pdev, + nonemb_cmd.size, + nonemb_cmd.va, + nonemb_cmd.dma); + return -ENOMEM; + } + + rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info, + ioctl_size); + + /* Check if the error is because of Insufficent_Buffer */ + if (rc == -EAGAIN) { + + /* Get the new memory size */ + ioctl_size = ((struct be_cmd_resp_hdr *) + nonemb_cmd.va)->actual_resp_len; + ioctl_size += sizeof(struct be_cmd_req_hdr); + + /* Free the previous allocated DMA memory */ + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, + nonemb_cmd.dma); + + /* Free the virtual memory */ + kfree(*if_info); + } else + break; + } while (true); + return rc; } int mgmt_get_nic_conf(struct beiscsi_hba *phba, @@ -1223,7 +1393,6 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, { int rc; unsigned int tag; - struct be_mcc_wrb *wrb = NULL; tag = be_cmd_set_vlan(phba, vlan_tag); if (!tag) { @@ -1233,7 +1402,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, return -EBUSY; } - rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + rc = beiscsi_mccq_compl(phba, tag, NULL, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), @@ -1260,6 +1429,87 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, } /** + * beiscsi_fw_ver_disp()- Display Firmware Version + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Firmware version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); +} + +/** + * beiscsi_active_session_disp()- Display Sessions Active + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num); + total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num); + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, + (total_cids - avlbl_cids)); + } else + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, 0); + } + + return len; +} + +/** + * beiscsi_free_session_disp()- Display Avaliable Session + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint16_t ulp_num, len = 0; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, + BEISCSI_ULP_AVLBL_CID(phba, ulp_num)); + else + len += snprintf(buf+len, PAGE_SIZE - len, + "ULP%d : %d\n", ulp_num, 0); + } + + return len; +} + +/** * beiscsi_adap_family_disp()- Display adapter family. * @dev: ptr to device to get priv structure * @attr: device attribute, not used. @@ -1292,11 +1542,30 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, break; default: return snprintf(buf, PAGE_SIZE, - "Unkown Adapter Family: 0x%x\n", dev_id); + "Unknown Adapter Family: 0x%x\n", dev_id); break; } } +/** + * beiscsi_phys_port()- Display Physical Port Identifier + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text port identifier + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n", + phba->fw_config.phys_port); +} void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, @@ -1370,10 +1639,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, memset(pwrb, 0, sizeof(*pwrb)); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - max_burst_length, pwrb, params->dw[offsetof - (struct amap_beiscsi_offload_params, - max_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_burst_length, pwrb, params->dw[offsetof (struct amap_beiscsi_offload_params, @@ -1395,7 +1660,9 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, params->dw[offsetof(struct amap_beiscsi_offload_params, first_burst_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, - max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN); + max_recv_dataseg_len, pwrb, + params->dw[offsetof(struct amap_beiscsi_offload_params, + max_recv_data_segment_length) / 32]); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_cxns, pwrb, BEISCSI_MAX_CXNS); AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb, diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 2e4968add79..24a8fc57747 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -156,25 +156,25 @@ union invalidate_commands_params { } __packed; struct mgmt_hba_attributes { - u8 flashrom_version_string[32]; - u8 manufacturer_name[32]; + u8 flashrom_version_string[BEISCSI_VER_STRLEN]; + u8 manufacturer_name[BEISCSI_VER_STRLEN]; u32 supported_modes; u8 seeprom_version_lo; u8 seeprom_version_hi; u8 rsvd0[2]; u32 fw_cmd_data_struct_version; u32 ep_fw_data_struct_version; - u32 future_reserved[12]; + u8 ncsi_version_string[12]; u32 default_extended_timeout; - u8 controller_model_number[32]; + u8 controller_model_number[BEISCSI_VER_STRLEN]; u8 controller_description[64]; - u8 controller_serial_number[32]; - u8 ip_version_string[32]; - u8 firmware_version_string[32]; - u8 bios_version_string[32]; - u8 redboot_version_string[32]; - u8 driver_version_string[32]; - u8 fw_on_flash_version_string[32]; + u8 controller_serial_number[BEISCSI_VER_STRLEN]; + u8 ip_version_string[BEISCSI_VER_STRLEN]; + u8 firmware_version_string[BEISCSI_VER_STRLEN]; + u8 bios_version_string[BEISCSI_VER_STRLEN]; + u8 redboot_version_string[BEISCSI_VER_STRLEN]; + u8 driver_version_string[BEISCSI_VER_STRLEN]; + u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN]; u32 functionalities_supported; u16 max_cdblength; u8 asic_revision; @@ -190,7 +190,8 @@ struct mgmt_hba_attributes { u32 firmware_post_status; u32 hba_mtu[8]; u8 iscsi_features; - u8 future_u8[3]; + u8 asic_generation; + u8 future_u8[2]; u32 future_u32[3]; } __packed; @@ -207,7 +208,7 @@ struct mgmt_controller_attributes { u64 unique_identifier; u8 netfilters; u8 rsvd0[3]; - u8 future_u32[4]; + u32 future_u32[4]; } __packed; struct be_mgmt_controller_attributes { @@ -293,7 +294,7 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba, struct be_cmd_get_nic_conf_resp *mac); int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, - struct be_cmd_get_if_info_resp *if_info); + struct be_cmd_get_if_info_resp **if_info); int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, struct be_cmd_get_def_gateway_resp *gateway); @@ -311,9 +312,22 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); ssize_t beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t beiscsi_fw_ver_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_active_session_disp(struct device *dev, + struct device_attribute *attr, char *buf); + ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf); + +ssize_t beiscsi_free_session_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_phys_port_disp(struct device *dev, + struct device_attribute *attr, char *buf); + void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, struct be_mem_descriptor *mem_descr); @@ -321,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle); void beiscsi_ue_detect(struct beiscsi_hba *phba); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *, int num); #endif diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 342d7d9c099..e3f67b097a5 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1367,10 +1367,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, struct bfa_iocfc_s *iocfc = &bfa->iocfc; bfa_status_t status; - iocfc->faa_args.faa_attr = attr; - iocfc->faa_args.faa_cb.faa_cbfn = cbfn; - iocfc->faa_args.faa_cb.faa_cbarg = cbarg; - status = bfa_faa_validate_request(bfa); if (status != BFA_STATUS_OK) return status; @@ -1378,6 +1374,10 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, if (iocfc->faa_args.busy == BFA_TRUE) return BFA_STATUS_DEVBUSY; + iocfc->faa_args.faa_attr = attr; + iocfc->faa_args.faa_cb.faa_cbfn = cbfn; + iocfc->faa_args.faa_cb.faa_cbarg = cbarg; + iocfc->faa_args.busy = BFA_TRUE; memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s)); bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, @@ -1432,6 +1432,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; + bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED); } @@ -1567,7 +1568,6 @@ bfa_iocfc_start(struct bfa_s *bfa) void bfa_iocfc_stop(struct bfa_s *bfa) { - bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP); } @@ -1674,7 +1674,6 @@ bfa_iocfc_disable(struct bfa_s *bfa) bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, "IOC Disable"); - bfa->queue_process = BFA_FALSE; bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE); } diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index 0efdf312b42..877b86dd283 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h @@ -45,6 +45,7 @@ enum { BFA_MFG_TYPE_PROWLER_C = 1710, /* Prowler CNA only cards */ BFA_MFG_TYPE_PROWLER_D = 1860, /* Prowler Dual cards */ BFA_MFG_TYPE_CHINOOK = 1867, /* Chinook cards */ + BFA_MFG_TYPE_CHINOOK2 = 1869, /*!< Chinook2 cards */ BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */ }; @@ -59,7 +60,8 @@ enum { (type) == BFA_MFG_TYPE_ASTRA || \ (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ (type) == BFA_MFG_TYPE_LIGHTNING || \ - (type) == BFA_MFG_TYPE_CHINOOK)) + (type) == BFA_MFG_TYPE_CHINOOK || \ + (type) == BFA_MFG_TYPE_CHINOOK2)) /* * Check if the card having old wwn/mac handling @@ -130,6 +132,7 @@ enum bfa_status { BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists, * contact support */ BFA_STATUS_EPROTOCOL = 6, /* Protocol error */ + BFA_STATUS_BADFLASH = 9, /* Flash is bad */ BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */ BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */ BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted */ @@ -185,6 +188,8 @@ enum bfa_status { BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */ BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */ BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */ + BFA_STATUS_BBCR_FC_ONLY = 201, /*!< BBCredit Recovery is supported for * + * FC mode only */ BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */ BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */ BFA_STATUS_TOPOLOGY_LOOP = 230, /* Topology is set to Loop */ @@ -197,7 +202,34 @@ enum bfa_status { BFA_STATUS_DPORT_DISABLED = 236, /* D-port mode is already disabled */ BFA_STATUS_CMD_NOTSUPP_MEZZ = 239, /* Cmd not supported for MEZZ card */ BFA_STATUS_FRU_NOT_PRESENT = 240, /* fru module not present */ + BFA_STATUS_DPORT_NO_SFP = 243, /* SFP is not present.\n D-port will be + * enabled but it will be operational + * only after inserting a valid SFP. */ BFA_STATUS_DPORT_ERR = 245, /* D-port mode is enabled */ + BFA_STATUS_DPORT_ENOSYS = 254, /* Switch has no D_Port functionality */ + BFA_STATUS_DPORT_CANT_PERF = 255, /* Switch port is not D_Port capable + * or D_Port is disabled */ + BFA_STATUS_DPORT_LOGICALERR = 256, /* Switch D_Port fail */ + BFA_STATUS_DPORT_SWBUSY = 257, /* Switch port busy */ + BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT = 258, /*!< BB credit recovery is + * supported at max port speed alone */ + BFA_STATUS_ERROR_BBCR_ENABLED = 259, /*!< BB credit recovery + * is enabled */ + BFA_STATUS_INVALID_BBSCN = 260, /*!< Invalid BBSCN value. + * Valid range is [1-15] */ + BFA_STATUS_DDPORT_ERR = 261, /* Dynamic D_Port mode is active.\n To + * exit dynamic mode, disable D_Port on + * the remote port */ + BFA_STATUS_DPORT_SFPWRAP_ERR = 262, /* Clear e/o_wrap fail, check or + * replace SFP */ + BFA_STATUS_BBCR_CFG_NO_CHANGE = 265, /*!< BBCR is operational. + * Disable BBCR and try this operation again. */ + BFA_STATUS_DPORT_SW_NOTREADY = 268, /* Remote port is not ready to + * start dport test. Check remote + * port status. */ + BFA_STATUS_DPORT_INV_SFP = 271, /* Invalid SFP for D-PORT mode. */ + BFA_STATUS_DPORT_CMD_NOTSUPP = 273, /* Dport is not supported by + * remote port */ BFA_STATUS_MAX_VAL /* Unknown error code */ }; #define bfa_status_t enum bfa_status @@ -234,6 +266,7 @@ enum { BFA_ADAPTER_MFG_NAME_LEN = 8, /* manufacturer name length */ BFA_ADAPTER_SYM_NAME_LEN = 64, /* adapter symbolic name length */ BFA_ADAPTER_OS_TYPE_LEN = 64, /* adapter os type length */ + BFA_ADAPTER_UUID_LEN = 16, /* adapter uuid length */ }; struct bfa_adapter_attr_s { @@ -267,6 +300,7 @@ struct bfa_adapter_attr_s { u8 mfg_month; /* manufacturing month */ u16 mfg_year; /* manufacturing year */ u16 rsvd; + u8 uuid[BFA_ADAPTER_UUID_LEN]; }; /* @@ -380,7 +414,8 @@ struct bfa_ioc_attr_s { u8 port_mode; /* bfa_mode_s */ u8 cap_bm; /* capability */ u8 port_mode_cfg; /* bfa_mode_s */ - u8 rsvd[4]; /* 64bit align */ + u8 def_fn; /* 1 if default fn */ + u8 rsvd[3]; /* 64bit align */ }; /* @@ -517,17 +552,6 @@ struct bfa_ioc_aen_data_s { }; /* - * D-port states - * -*/ -enum bfa_dport_state { - BFA_DPORT_ST_DISABLED = 0, /* D-port is Disabled */ - BFA_DPORT_ST_DISABLING = 1, /* D-port is Disabling */ - BFA_DPORT_ST_ENABLING = 2, /* D-port is Enabling */ - BFA_DPORT_ST_ENABLED = 3, /* D-port is Enabled */ -}; - -/* * ---------------------- mfg definitions ------------ */ @@ -614,6 +638,7 @@ enum { BFA_PCI_DEVICE_ID_CT = 0x14, BFA_PCI_DEVICE_ID_CT_FC = 0x21, BFA_PCI_DEVICE_ID_CT2 = 0x22, + BFA_PCI_DEVICE_ID_CT2_QUAD = 0x23, }; #define bfa_asic_id_cb(__d) \ @@ -622,7 +647,9 @@ enum { #define bfa_asic_id_ct(__d) \ ((__d) == BFA_PCI_DEVICE_ID_CT || \ (__d) == BFA_PCI_DEVICE_ID_CT_FC) -#define bfa_asic_id_ct2(__d) ((__d) == BFA_PCI_DEVICE_ID_CT2) +#define bfa_asic_id_ct2(__d) \ + ((__d) == BFA_PCI_DEVICE_ID_CT2 || \ + (__d) == BFA_PCI_DEVICE_ID_CT2_QUAD) #define bfa_asic_id_ctc(__d) \ (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d)) @@ -1126,6 +1153,7 @@ struct bfa_flash_attr_s { #define LB_PATTERN_DEFAULT 0xB5B5B5B5 #define QTEST_CNT_DEFAULT 10 #define QTEST_PAT_DEFAULT LB_PATTERN_DEFAULT +#define DPORT_ENABLE_LOOPCNT_DEFAULT (1024 * 1024) struct bfa_diag_memtest_s { u8 algo; @@ -1154,6 +1182,54 @@ struct bfa_diag_loopback_result_s { u8 rsvd[3]; }; +enum bfa_diag_dport_test_status { + DPORT_TEST_ST_IDLE = 0, /* the test has not started yet. */ + DPORT_TEST_ST_FINAL = 1, /* the test done successfully */ + DPORT_TEST_ST_SKIP = 2, /* the test skipped */ + DPORT_TEST_ST_FAIL = 3, /* the test failed */ + DPORT_TEST_ST_INPRG = 4, /* the testing is in progress */ + DPORT_TEST_ST_RESPONDER = 5, /* test triggered from remote port */ + DPORT_TEST_ST_STOPPED = 6, /* the test stopped by user. */ + DPORT_TEST_ST_MAX +}; + +enum bfa_diag_dport_test_type { + DPORT_TEST_ELOOP = 0, + DPORT_TEST_OLOOP = 1, + DPORT_TEST_ROLOOP = 2, + DPORT_TEST_LINK = 3, + DPORT_TEST_MAX +}; + +enum bfa_diag_dport_test_opmode { + BFA_DPORT_OPMODE_AUTO = 0, + BFA_DPORT_OPMODE_MANU = 1, +}; + +struct bfa_diag_dport_subtest_result_s { + u8 status; /* bfa_diag_dport_test_status */ + u8 rsvd[7]; /* 64bit align */ + u64 start_time; /* timestamp */ +}; + +struct bfa_diag_dport_result_s { + wwn_t rp_pwwn; /* switch port wwn */ + wwn_t rp_nwwn; /* switch node wwn */ + u64 start_time; /* user/sw start time */ + u64 end_time; /* timestamp */ + u8 status; /* bfa_diag_dport_test_status */ + u8 mode; /* bfa_diag_dport_test_opmode */ + u8 rsvd; /* 64bit align */ + u8 speed; /* link speed for buf_reqd */ + u16 buffer_required; + u16 frmsz; /* frame size for buf_reqd */ + u32 lpcnt; /* Frame count */ + u32 pat; /* Pattern */ + u32 roundtrip_latency; /* in nano sec */ + u32 est_cable_distance; /* in meter */ + struct bfa_diag_dport_subtest_result_s subtest[DPORT_TEST_MAX]; +}; + struct bfa_diag_ledtest_s { u32 cmd; /* bfa_led_op_t */ u32 color; /* bfa_led_color_t */ diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index ec03c8cd8da..638f441ffc3 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -105,6 +105,9 @@ struct bfa_fw_ioim_stats_s { * an error condition*/ u32 wait_for_si; /* FW wait for SI */ u32 rec_rsp_inval; /* REC rsp invalid */ + u32 rec_rsp_xchg_comp; /* REC rsp xchg complete */ + u32 rec_rsp_rd_si_ownd; /* REC rsp read si owned */ + u32 seqr_io_abort; /* target does not know cmd so abort */ u32 seqr_io_retry; /* SEQR failed so retry IO */ @@ -257,8 +260,6 @@ struct bfa_fw_port_lksm_stats_s { u32 nos_tx; /* No. of times NOS tx started */ u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */ u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */ - u32 bbsc_lr; /* LKSM LR tx for credit recovery */ - u32 rsvd; }; struct bfa_fw_port_snsm_stats_s { @@ -409,7 +410,7 @@ struct bfa_fw_trunk_stats_s { u32 rsvd; /* padding for 64 bit alignment */ }; -struct bfa_fw_advsm_stats_s { +struct bfa_fw_aport_stats_s { u32 flogi_sent; /* Flogi sent */ u32 flogi_acc_recvd; /* Flogi Acc received */ u32 flogi_rjt_recvd; /* Flogi rejects received */ @@ -419,6 +420,12 @@ struct bfa_fw_advsm_stats_s { u32 elp_accepted; /* ELP Accepted */ u32 elp_rejected; /* ELP rejected */ u32 elp_dropped; /* ELP dropped */ + + u32 bbcr_lr_count; /*!< BBCR Link Resets */ + u32 frame_lost_intrs; /*!< BBCR Frame loss intrs */ + u32 rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */ + + u32 rsvd; }; /* @@ -479,6 +486,14 @@ struct bfa_fw_ct_mod_stats_s { }; /* + * RDS mod stats + */ +struct bfa_fw_rds_stats_s { + u32 no_fid_drop_err; /* RDS no fid drop error */ + u32 rsvd; /* 64bit align */ +}; + +/* * IOC firmware stats */ struct bfa_fw_stats_s { @@ -489,10 +504,11 @@ struct bfa_fw_stats_s { struct bfa_fw_fcxchg_stats_s fcxchg_stats; struct bfa_fw_lps_stats_s lps_stats; struct bfa_fw_trunk_stats_s trunk_stats; - struct bfa_fw_advsm_stats_s advsm_stats; + struct bfa_fw_aport_stats_s aport_stats; struct bfa_fw_mac_mod_stats_s macmod_stats; struct bfa_fw_ct_mod_stats_s ctmod_stats; struct bfa_fw_eth_sndrcv_stats_s ethsndrcv_stats; + struct bfa_fw_rds_stats_s rds_stats; }; #define BFA_IOCFC_PATHTOV_MAX 60 @@ -545,6 +561,27 @@ struct bfa_qos_attr_s { struct bfa_qos_bw_s qos_bw_op; /* QOS bw operational */ }; +enum bfa_bbcr_state { + BFA_BBCR_DISABLED, /*!< BBCR is disable */ + BFA_BBCR_ONLINE, /*!< BBCR is online */ + BFA_BBCR_OFFLINE, /*!< BBCR is offline */ +}; + +enum bfa_bbcr_err_reason { + BFA_BBCR_ERR_REASON_NONE, /*!< Unknown */ + BFA_BBCR_ERR_REASON_SPEED_UNSUP, /*!< Port speed < max sup_speed */ + BFA_BBCR_ERR_REASON_PEER_UNSUP, /*!< BBCR is disable on peer port */ + BFA_BBCR_ERR_REASON_NON_BRCD_SW, /*!< Connected to non BRCD switch */ + BFA_BBCR_ERR_REASON_FLOGI_RJT, /*!< Login rejected by the switch */ +}; + +struct bfa_bbcr_attr_s { + u8 state; + u8 peer_bb_scn; + u8 reason; + u8 rsvd; +}; + /* * These fields should be displayed only from the CLI. * There will be a separate BFAL API (get_qos_vc_attr ?) @@ -736,6 +773,7 @@ enum bfa_port_states { BFA_PORT_ST_TOGGLING_QWAIT = 14, BFA_PORT_ST_FAA_MISCONFIG = 15, BFA_PORT_ST_DPORT = 16, + BFA_PORT_ST_DDPORT = 17, BFA_PORT_ST_MAX_STATE, }; @@ -857,6 +895,15 @@ enum bfa_lunmask_state_s { BFA_LUNMASK_UNINITIALIZED = 0xff, }; +/** + * FEC states + */ +enum bfa_fec_state_s { + BFA_FEC_ONLINE = 1, /*!< FEC is online */ + BFA_FEC_OFFLINE = 2, /*!< FEC is offline */ + BFA_FEC_OFFLINE_NOT_16G = 3, /*!< FEC is offline (speed not 16Gig) */ +}; + #pragma pack(1) /* * LUN mask configuration @@ -892,6 +939,9 @@ struct bfa_defs_fcpim_throttle_s { u16 rsvd; }; +#define BFA_BB_SCN_DEF 3 +#define BFA_BB_SCN_MAX 0x0F + /* * Physical port configuration */ @@ -907,8 +957,8 @@ struct bfa_port_cfg_s { u8 tx_bbcredit; /* transmit buffer credits */ u8 ratelimit; /* ratelimit enabled or not */ u8 trl_def_speed; /* ratelimit default speed */ - u8 bb_scn; /* BB_SCN value from FLOGI Exchg */ - u8 bb_scn_state; /* Config state of BB_SCN */ + u8 bb_cr_enabled; /*!< Config state of BB_SCN */ + u8 bb_scn; /*!< BB_SCN value for FLOGI Exchg */ u8 faa_state; /* FAA enabled/disabled */ u8 rsvd1; u16 path_tov; /* device path timeout */ @@ -950,6 +1000,7 @@ struct bfa_port_attr_s { bfa_boolean_t link_e2e_beacon; /* link beacon is on */ bfa_boolean_t bbsc_op_status; /* fc credit recovery oper * state */ + enum bfa_fec_state_s fec_state; /*!< current FEC state */ /* * Dynamic field - info from FCS @@ -961,7 +1012,7 @@ struct bfa_port_attr_s { /* FCoE specific */ u16 fcoe_vlan; - u8 rsvd1[6]; + u8 rsvd1[2]; }; /* @@ -1048,10 +1099,12 @@ struct bfa_port_link_s { u8 speed; /* Link speed (1/2/4/8 G) */ u32 linkstate_opt; /* Linkstate optional data (debug) */ u8 trunked; /* Trunked or not (1 or 0) */ - u8 resvd[7]; + u8 fec_state; /*!< State of FEC */ + u8 resvd[6]; struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ union { struct bfa_fcport_loop_info_s loop_info; + struct bfa_bbcr_attr_s bbcr_attr; union { struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ @@ -1215,9 +1268,11 @@ struct bfa_port_fc_stats_s { u64 bad_os_count; /* Invalid ordered sets */ u64 err_enc_out; /* Encoding err nonframe_8b10b */ u64 err_enc; /* Encoding err frame_8b10b */ - u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */ - u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */ - u64 bbsc_link_resets; /* Credit Recovery-Link Resets */ + u64 bbcr_frames_lost; /*!< BBCR Frames Lost */ + u64 bbcr_rrdys_lost; /*!< BBCR RRDYs Lost */ + u64 bbcr_link_resets; /*!< BBCR Link Resets */ + u64 bbcr_frame_lost_intrs; /*!< BBCR Frame loss intrs */ + u64 bbcr_rrdy_lost_intrs; /*!< BBCR Rrdy loss intrs */ u64 loop_timeouts; /* Loop timeouts */ }; diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index bea821b9803..64069a0a3d0 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -1026,7 +1026,7 @@ struct fc_alpabm_s { #define FC_ED_TOV 2 #define FC_REC_TOV (FC_ED_TOV + 1) #define FC_RA_TOV 10 -#define FC_ELS_TOV ((2 * FC_RA_TOV) + 1) +#define FC_ELS_TOV (2 * FC_RA_TOV) #define FC_FCCT_TOV (3 * FC_RA_TOV) /* @@ -1531,6 +1531,12 @@ enum fdmi_hba_attribute_type { FDMI_HBA_ATTRIB_FW_VERSION, /* 0x0009 */ FDMI_HBA_ATTRIB_OS_NAME, /* 0x000A */ FDMI_HBA_ATTRIB_MAX_CT, /* 0x000B */ + FDMI_HBA_ATTRIB_NODE_SYM_NAME, /* 0x000C */ + FDMI_HBA_ATTRIB_VENDOR_INFO, /* 0x000D */ + FDMI_HBA_ATTRIB_NUM_PORTS, /* 0x000E */ + FDMI_HBA_ATTRIB_FABRIC_NAME, /* 0x000F */ + FDMI_HBA_ATTRIB_BIOS_VER, /* 0x0010 */ + FDMI_HBA_ATTRIB_VENDOR_ID = 0x00E0, FDMI_HBA_ATTRIB_MAX_TYPE }; @@ -1545,6 +1551,15 @@ enum fdmi_port_attribute_type { FDMI_PORT_ATTRIB_FRAME_SIZE, /* 0x0004 */ FDMI_PORT_ATTRIB_DEV_NAME, /* 0x0005 */ FDMI_PORT_ATTRIB_HOST_NAME, /* 0x0006 */ + FDMI_PORT_ATTRIB_NODE_NAME, /* 0x0007 */ + FDMI_PORT_ATTRIB_PORT_NAME, /* 0x0008 */ + FDMI_PORT_ATTRIB_PORT_SYM_NAME, /* 0x0009 */ + FDMI_PORT_ATTRIB_PORT_TYPE, /* 0x000A */ + FDMI_PORT_ATTRIB_SUPP_COS, /* 0x000B */ + FDMI_PORT_ATTRIB_PORT_FAB_NAME, /* 0x000C */ + FDMI_PORT_ATTRIB_PORT_FC4_TYPE, /* 0x000D */ + FDMI_PORT_ATTRIB_PORT_STATE = 0x101, /* 0x0101 */ + FDMI_PORT_ATTRIB_PORT_NUM_RPRT = 0x102, /* 0x0102 */ FDMI_PORT_ATTR_MAX_TYPE }; diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 27b56096235..d7385d1d9c5 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -2882,7 +2882,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) iotag = be16_to_cpu(rsp->io_tag); ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); - WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag); + WARN_ON(ioim->iotag != iotag); bfa_ioim_cb_profile_comp(fcpim, ioim); diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index d428808fb37..a3ab5cce420 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -240,9 +240,6 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); -static u8 bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric); -static bfa_boolean_t bfa_fcs_fabric_is_bbscn_enabled( - struct bfa_fcs_fabric_s *fabric); static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); @@ -404,8 +401,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_CONT_OP: bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, - fabric->bb_credit, - bfa_fcs_fabric_oper_bbscn(fabric)); + fabric->bb_credit); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { @@ -433,8 +429,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, - fabric->bb_credit, - bfa_fcs_fabric_oper_bbscn(fabric)); + fabric->bb_credit); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; @@ -602,8 +597,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, - fabric->bb_credit, - bfa_fcs_fabric_oper_bbscn(fabric)); + fabric->bb_credit); break; case BFA_FCS_FABRIC_SM_RETRY_OP: @@ -965,10 +959,6 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) case BFA_STATUS_FABRIC_RJT: fabric->stats.flogi_rejects++; - if (fabric->lps->lsrjt_rsn == FC_LS_RJT_RSN_LOGICAL_ERROR && - fabric->lps->lsrjt_expl == FC_LS_RJT_EXP_NO_ADDL_INFO) - fabric->fcs->bbscn_flogi_rjt = BFA_TRUE; - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP); return; @@ -1014,14 +1004,11 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) { struct bfa_s *bfa = fabric->fcs->bfa; struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg; - u8 alpa = 0, bb_scn = 0; + u8 alpa = 0; - if (bfa_fcs_fabric_is_bbscn_enabled(fabric) && - (!fabric->fcs->bbscn_flogi_rjt)) - bb_scn = BFA_FCS_PORT_DEF_BB_SCN; bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), - pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd, bb_scn); + pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); fabric->stats.flogi_sent++; } @@ -1102,40 +1089,6 @@ bfa_fcs_fabric_stop(struct bfa_fcs_fabric_s *fabric) } /* - * Computes operating BB_SCN value - */ -static u8 -bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric) -{ - u8 pr_bbscn = fabric->lps->pr_bbscn; - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa); - - if (!(fcport->cfg.bb_scn_state && pr_bbscn)) - return 0; - - /* return max of local/remote bb_scn values */ - return ((pr_bbscn > BFA_FCS_PORT_DEF_BB_SCN) ? - pr_bbscn : BFA_FCS_PORT_DEF_BB_SCN); -} - -/* - * Check if BB_SCN can be enabled. - */ -static bfa_boolean_t -bfa_fcs_fabric_is_bbscn_enabled(struct bfa_fcs_fabric_s *fabric) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa); - - if (bfa_ioc_get_fcmode(&fabric->fcs->bfa->ioc) && - fcport->cfg.bb_scn_state && - !bfa_fcport_is_qos_enabled(fabric->fcs->bfa) && - !bfa_fcport_is_trunk_enabled(fabric->fcs->bfa)) - return BFA_TRUE; - else - return BFA_FALSE; -} - -/* * Delete all vports and wait for vport delete completions. */ static void @@ -1273,7 +1226,6 @@ void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) { bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn); - fabric->fcs->bbscn_flogi_rjt = BFA_FALSE; bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } @@ -1480,7 +1432,6 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, } fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); - fabric->lps->pr_bbscn = (be16_to_cpu(flogi->csp.rxsz) >> 12); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; @@ -1513,8 +1464,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) n2n_port->reply_oxid, pcfg->pwwn, pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), - bfa_fcport_get_rx_bbcredit(bfa), - bfa_fcs_fabric_oper_bbscn(fabric)); + bfa_fcport_get_rx_bbcredit(bfa), 0); bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag, BFA_FALSE, FC_CLASS_3, diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index a449706c6bc..42bcb970445 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -243,24 +243,21 @@ struct bfa_fcs_fabric_s; * Symbolic Name. * * Physical Port's symbolic name Format : (Total 128 bytes) - * Adapter Model number/name : 12 bytes + * Adapter Model number/name : 16 bytes * Driver Version : 10 bytes * Host Machine Name : 30 bytes - * Host OS Info : 48 bytes + * Host OS Info : 44 bytes * Host OS PATCH Info : 16 bytes * ( remaining 12 bytes reserved to be used for separator) */ #define BFA_FCS_PORT_SYMBNAME_SEPARATOR " | " -#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 12 +#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ 16 #define BFA_FCS_PORT_SYMBNAME_VERSION_SZ 10 #define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ 30 -#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 +#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 44 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 -/* bb_scn value in 2^bb_scn */ -#define BFA_FCS_PORT_DEF_BB_SCN 3 - /* * Get FC port ID for a logical port. */ @@ -299,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn); +void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname); void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, struct bfa_lport_info_s *port_info); void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, @@ -630,6 +628,9 @@ void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, #define BFA_FCS_FDMI_SUPP_SPEEDS_10G FDMI_TRANS_SPEED_10G +#define BFA_FCS_FDMI_VENDOR_INFO_LEN 8 +#define BFA_FCS_FDMI_FC4_TYPE_LEN 32 + /* * HBA Attribute Block : BFA internal representation. Note : Some variable * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based @@ -640,25 +641,39 @@ struct bfa_fcs_fdmi_hba_attr_s { u8 manufacturer[64]; u8 serial_num[64]; u8 model[16]; - u8 model_desc[256]; + u8 model_desc[128]; u8 hw_version[8]; u8 driver_version[BFA_VERSION_LEN]; u8 option_rom_ver[BFA_VERSION_LEN]; u8 fw_version[BFA_VERSION_LEN]; u8 os_name[256]; __be32 max_ct_pyld; + struct bfa_lport_symname_s node_sym_name; + u8 vendor_info[BFA_FCS_FDMI_VENDOR_INFO_LEN]; + __be32 num_ports; + wwn_t fabric_name; + u8 bios_ver[BFA_VERSION_LEN]; }; /* * Port Attribute Block */ struct bfa_fcs_fdmi_port_attr_s { - u8 supp_fc4_types[32]; /* supported FC4 types */ + u8 supp_fc4_types[BFA_FCS_FDMI_FC4_TYPE_LEN]; __be32 supp_speed; /* supported speed */ __be32 curr_speed; /* current Speed */ __be32 max_frm_size; /* max frame size */ u8 os_device_name[256]; /* OS device Name */ u8 host_name[256]; /* host name */ + wwn_t port_name; + wwn_t node_name; + struct bfa_lport_symname_s port_sym_name; + __be32 port_type; + enum fc_cos scos; + wwn_t port_fabric_name; + u8 port_act_fc4_type[BFA_FCS_FDMI_FC4_TYPE_LEN]; + __be32 port_state; + __be32 num_ports; }; struct bfa_fcs_stats_s { @@ -683,8 +698,6 @@ struct bfa_fcs_s { struct bfa_trc_mod_s *trcmod; /* tracing module */ bfa_boolean_t vf_enabled; /* VF mode is enabled */ bfa_boolean_t fdmi_enabled; /* FDMI is enabled */ - bfa_boolean_t bbscn_enabled; /* Driver Config Parameter */ - bfa_boolean_t bbscn_flogi_rjt;/* FLOGI reject due to BB_SCN */ bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ u16 port_vfid; /* port default VF ID */ struct bfa_fcs_driver_info_s driver_info; diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 1224d0462a4..ff75ef89175 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -773,7 +773,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, bfa_trc(lport->fcs, fchs->type); if (!bfa_fcs_lport_is_online(lport)) { - bfa_stats(lport, uf_recv_drops); + /* + * In direct attach topology, it is possible to get a PLOGI + * before the lport is online due to port feature + * (QoS/Trunk/FEC/CR), so send a rjt + */ + if ((fchs->type == FC_TYPE_ELS) && + (els_cmd->els_code == FC_ELS_PLOGI)) { + bfa_fcs_lport_send_ls_rjt(lport, fchs, + FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, + FC_LS_RJT_EXP_NO_ADDL_INFO); + bfa_stats(lport, plogi_rcvd); + } else + bfa_stats(lport, uf_recv_drops); + return; } @@ -1097,6 +1110,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); } +void +bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, + char *symname) +{ + strcpy(port->port_cfg.sym_name.symname, symname); + + if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) + bfa_fcs_lport_ns_util_send_rspn_id( + BFA_FCS_GET_NS_FROM_PORT(port), NULL); +} + /* * fcs_lport_api */ @@ -2048,10 +2072,71 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); templen = sizeof(fcs_hba_attr->max_ct_pyld); memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; len += templen; count++; attr->len = cpu_to_be16(templen + sizeof(attr->type) + sizeof(templen)); + /* + * Send extended attributes ( FOS 7.1 support ) + */ + if (fdmi->retry_cnt == 0) { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODE_SYM_NAME); + templen = sizeof(fcs_hba_attr->node_sym_name); + memcpy(attr->value, &fcs_hba_attr->node_sym_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_VENDOR_ID); + templen = sizeof(fcs_hba_attr->vendor_info); + memcpy(attr->value, &fcs_hba_attr->vendor_info, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NUM_PORTS); + templen = sizeof(fcs_hba_attr->num_ports); + memcpy(attr->value, &fcs_hba_attr->num_ports, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FABRIC_NAME); + templen = sizeof(fcs_hba_attr->fabric_name); + memcpy(attr->value, &fcs_hba_attr->fabric_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_BIOS_VER); + templen = sizeof(fcs_hba_attr->bios_ver); + memcpy(attr->value, &fcs_hba_attr->bios_ver, templen); + templen = fc_roundup(attr->len, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + count++; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } /* * Update size of payload @@ -2252,6 +2337,113 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, sizeof(templen)); } + if (fdmi->retry_cnt == 0) { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_NODE_NAME); + templen = sizeof(fcs_port_attr.node_name); + memcpy(attr->value, &fcs_port_attr.node_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NAME); + templen = sizeof(fcs_port_attr.port_name); + memcpy(attr->value, &fcs_port_attr.port_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(attr->len) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + if (fcs_port_attr.port_sym_name.symname[0] != '\0') { + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = + cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SYM_NAME); + templen = sizeof(fcs_port_attr.port_sym_name); + memcpy(attr->value, + &fcs_port_attr.port_sym_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + + sizeof(attr->type) + sizeof(templen)); + } + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_TYPE); + templen = sizeof(fcs_port_attr.port_type); + memcpy(attr->value, &fcs_port_attr.port_type, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_COS); + templen = sizeof(fcs_port_attr.scos); + memcpy(attr->value, &fcs_port_attr.scos, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FAB_NAME); + templen = sizeof(fcs_port_attr.port_fabric_name); + memcpy(attr->value, &fcs_port_attr.port_fabric_name, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_FC4_TYPE); + templen = sizeof(fcs_port_attr.port_act_fc4_type); + memcpy(attr->value, fcs_port_attr.port_act_fc4_type, + templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_STATE); + templen = sizeof(fcs_port_attr.port_state); + memcpy(attr->value, &fcs_port_attr.port_state, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + + attr = (struct fdmi_attr_s *) curr_ptr; + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_NUM_RPRT); + templen = sizeof(fcs_port_attr.num_ports); + memcpy(attr->value, &fcs_port_attr.num_ports, templen); + templen = fc_roundup(templen, sizeof(u32)); + curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; + len += templen; + ++count; + attr->len = cpu_to_be16(templen + sizeof(attr->type) + + sizeof(templen)); + } + /* * Update size of payload */ @@ -2458,6 +2650,15 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, /* Retrieve the max frame size from the port attr */ bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size; + + strncpy(hba_attr->node_sym_name.symname, + port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN); + strcpy(hba_attr->vendor_info, "BROCADE"); + hba_attr->num_ports = + cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc)); + hba_attr->fabric_name = port->fabric->lps->pr_nwwn; + strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN); + } static void @@ -2467,6 +2668,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_lport_s *port = fdmi->ms->port; struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; struct bfa_port_attr_s pport_attr; + struct bfa_lport_attr_s lport_attr; memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); @@ -2531,6 +2733,18 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, strncpy(port_attr->host_name, (char *)driver_info->host_machine_name, sizeof(port_attr->host_name)); + port_attr->node_name = bfa_fcs_lport_get_nwwn(port); + port_attr->port_name = bfa_fcs_lport_get_pwwn(port); + + strncpy(port_attr->port_sym_name.symname, + (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN); + bfa_fcs_lport_get_attr(port, &lport_attr); + port_attr->port_type = cpu_to_be32(lport_attr.port_type); + port_attr->scos = pport_attr.cos_supported; + port_attr->port_fabric_name = port->fabric->lps->pr_nwwn; + fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->port_act_fc4_type); + port_attr->port_state = cpu_to_be32(pport_attr.port_state); + port_attr->num_ports = cpu_to_be32(port->num_rports); } /* @@ -4950,9 +5164,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced) u8 *psymbl = &symbl[0]; int len; - if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) - return; - /* Avoid sending RSPN in the following states. */ if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) || bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || @@ -5798,6 +6009,7 @@ enum bfa_fcs_vport_event { BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/ BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */ BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */ + BFA_FCS_VPORT_SM_FABRIC_MAX = 15, /* max vports on fabric */ }; static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, @@ -5983,6 +6195,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, break; case BFA_FCS_VPORT_SM_RSP_FAILED: + case BFA_FCS_VPORT_SM_FABRIC_MAX: bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); break; @@ -6053,6 +6266,7 @@ bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport, case BFA_FCS_VPORT_SM_OFFLINE: case BFA_FCS_VPORT_SM_RSP_ERROR: case BFA_FCS_VPORT_SM_RSP_FAILED: + case BFA_FCS_VPORT_SM_FABRIC_MAX: case BFA_FCS_VPORT_SM_RSP_DUP_WWN: bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); @@ -6338,7 +6552,7 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) else { bfa_fcs_vport_aen_post(&vport->lport, BFA_LPORT_AEN_NPIV_FABRIC_MAX); - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_FABRIC_MAX); } break; @@ -6724,7 +6938,19 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) break; } - bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); + + break; + + case BFA_STATUS_ETIMER: + vport->vport_stats.fdisc_timeouts++; + if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); + else + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED); break; case BFA_STATUS_FABRIC_RJT: diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index 58ac643ba9f..2035b0d6435 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -189,8 +189,8 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) break; case RPSM_EVENT_PLOGI_RCVD: - bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_fcs_online); - bfa_fcs_rport_fcs_online_action(rport); + bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); + bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_PLOGI_COMP: @@ -2577,7 +2577,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred); bfa_fcport_set_tx_bbcredit(port->fcs->bfa, - port->fabric->bb_credit, 0); + port->fabric->bb_credit); } } @@ -3430,9 +3430,10 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, num_ents = be16_to_cpu(rpsc2_acc->num_pids); bfa_trc(rport->fcs, num_ents); if (num_ents > 0) { - WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid); + WARN_ON(be32_to_cpu(rpsc2_acc->port_info[0].pid) != + bfa_ntoh3b(rport->pid)); bfa_trc(rport->fcs, - be16_to_cpu(rpsc2_acc->port_info[0].pid)); + be32_to_cpu(rpsc2_acc->port_info[0].pid)); bfa_trc(rport->fcs, be16_to_cpu(rpsc2_acc->port_info[0].speed)); bfa_trc(rport->fcs, diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 0116c1032e2..315d6d6dcfc 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -21,6 +21,7 @@ #include "bfi_reg.h" #include "bfa_defs.h" #include "bfa_defs_svc.h" +#include "bfi.h" BFA_TRC_FILE(CNA, IOC); @@ -45,6 +46,14 @@ BFA_TRC_FILE(CNA, IOC); #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) +#define bfa_ioc_state_disabled(__sm) \ + (((__sm) == BFI_IOC_UNINIT) || \ + ((__sm) == BFI_IOC_INITING) || \ + ((__sm) == BFI_IOC_HWINIT) || \ + ((__sm) == BFI_IOC_DISABLED) || \ + ((__sm) == BFI_IOC_FAIL) || \ + ((__sm) == BFI_IOC_CFG_DISABLED)) + /* * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ @@ -67,6 +76,14 @@ BFA_TRC_FILE(CNA, IOC); ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) #define bfa_ioc_sync_complete(__ioc) \ ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) +#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) +#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_alt_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc)) #define bfa_ioc_mbox_cmd_pending(__ioc) \ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ @@ -94,6 +111,12 @@ static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); +static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp( + struct bfi_ioc_image_hdr_s *base_fwhdr, + struct bfi_ioc_image_hdr_s *fwhdr_to_cmp); +static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp( + struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *base_fwhdr); /* * IOC state machine definitions/declarations @@ -698,7 +721,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) } /* h/w sem init */ - fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); + fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc); if (fwstate == BFI_IOC_UNINIT) { writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg); goto sem_get; @@ -725,8 +748,8 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) bfa_trc(iocpf->ioc, fwstate); bfa_trc(iocpf->ioc, swab32(fwhdr.exec)); - writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); + bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT); /* * Unlock the hw semaphore. Should be here only once per boot. @@ -1037,7 +1060,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) */ case IOCPF_E_TIMEOUT: - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; @@ -1138,7 +1161,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_SEMLOCKED: bfa_ioc_notify_fail(ioc); bfa_ioc_sync_leave(ioc); - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); break; @@ -1227,7 +1250,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) bfa_ioc_notify_fail(ioc); if (!iocpf->auto_recover) { bfa_ioc_sync_leave(ioc); - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); } else { @@ -1446,28 +1469,42 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) } /* - * Returns TRUE if same. + * Returns TRUE if driver is willing to work with current smem f/w version. */ bfa_boolean_t -bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) +bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *smem_fwhdr) { struct bfi_ioc_image_hdr_s *drv_fwhdr; - int i; + enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp; drv_fwhdr = (struct bfi_ioc_image_hdr_s *) bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); - for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { - if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) { - bfa_trc(ioc, i); - bfa_trc(ioc, fwhdr->md5sum[i]); - bfa_trc(ioc, drv_fwhdr->md5sum[i]); - return BFA_FALSE; - } + /* + * If smem is incompatible or old, driver should not work with it. + */ + drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr); + if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || + drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { + return BFA_FALSE; } - bfa_trc(ioc, fwhdr->md5sum[0]); - return BFA_TRUE; + /* + * IF Flash has a better F/W than smem do not work with smem. + * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. + * If Flash is old or incomp work with smem iff smem f/w == drv f/w. + */ + smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr); + + if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) { + return BFA_FALSE; + } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) { + return BFA_TRUE; + } else { + return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? + BFA_TRUE : BFA_FALSE; + } } /* @@ -1477,17 +1514,9 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) static bfa_boolean_t bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) { - struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; + struct bfi_ioc_image_hdr_s fwhdr; bfa_ioc_fwver_get(ioc, &fwhdr); - drv_fwhdr = (struct bfi_ioc_image_hdr_s *) - bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); - - if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) { - bfa_trc(ioc, fwhdr.signature); - bfa_trc(ioc, drv_fwhdr->signature); - return BFA_FALSE; - } if (swab32(fwhdr.bootenv) != boot_env) { bfa_trc(ioc, fwhdr.bootenv); @@ -1498,6 +1527,168 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) return bfa_ioc_fwver_cmp(ioc, &fwhdr); } +static bfa_boolean_t +bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1, + struct bfi_ioc_image_hdr_s *fwhdr_2) +{ + int i; + + for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) + if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) + return BFA_FALSE; + + return BFA_TRUE; +} + +/* + * Returns TRUE if major minor and maintainence are same. + * If patch versions are same, check for MD5 Checksum to be same. + */ +static bfa_boolean_t +bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr, + struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) +{ + if (drv_fwhdr->signature != fwhdr_to_cmp->signature) + return BFA_FALSE; + + if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) + return BFA_FALSE; + + if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) + return BFA_FALSE; + + if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) + return BFA_FALSE; + + if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && + drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && + drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) { + return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); + } + + return BFA_TRUE; +} + +static bfa_boolean_t +bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr) +{ + if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) + return BFA_FALSE; + + return BFA_TRUE; +} + +static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr) +{ + if (fwhdr->fwver.phase == 0 && + fwhdr->fwver.build == 0) + return BFA_TRUE; + + return BFA_FALSE; +} + +/* + * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. + */ +static enum bfi_ioc_img_ver_cmp_e +bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr, + struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) +{ + if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE) + return BFI_IOC_IMG_VER_INCOMP; + + if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_BETTER; + + else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_OLD; + + /* + * GA takes priority over internal builds of the same patch stream. + * At this point major minor maint and patch numbers are same. + */ + + if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) { + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_SAME; + else + return BFI_IOC_IMG_VER_OLD; + } else { + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_BETTER; + } + + if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_OLD; + + if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_OLD; + + /* + * All Version Numbers are equal. + * Md5 check to be done as a part of compatibility check. + */ + return BFI_IOC_IMG_VER_SAME; +} + +#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ + +bfa_status_t +bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off, + u32 *fwimg) +{ + return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, + BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), + (char *)fwimg, BFI_FLASH_CHUNK_SZ); +} + +static enum bfi_ioc_img_ver_cmp_e +bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *base_fwhdr) +{ + struct bfi_ioc_image_hdr_s *flash_fwhdr; + bfa_status_t status; + u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; + + status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg); + if (status != BFA_STATUS_OK) + return BFI_IOC_IMG_VER_INCOMP; + + flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg; + if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE) + return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); + else + return BFI_IOC_IMG_VER_INCOMP; +} + + +/* + * Invalidate fwver signature + */ +bfa_status_t +bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc) +{ + + u32 pgnum, pgoff; + u32 loff = 0; + enum bfi_ioc_state ioc_fwstate; + + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + if (!bfa_ioc_state_disabled(ioc_fwstate)) + return BFA_STATUS_ADAPTER_ENABLED; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + pgoff = PSS_SMEM_PGOFF(loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN); + + return BFA_STATUS_OK; +} + /* * Conditionally flush any pending message from firmware at start. */ @@ -1519,7 +1710,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) u32 boot_type; u32 boot_env; - ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); if (force) ioc_fwstate = BFI_IOC_UNINIT; @@ -1536,8 +1727,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env); if (!fwvalid) { - bfa_ioc_boot(ioc, boot_type, boot_env); - bfa_ioc_poll_fwinit(ioc); + if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); return; } @@ -1572,8 +1763,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) /* * Initialize the h/w for any other states. */ - bfa_ioc_boot(ioc, boot_type, boot_env); - bfa_ioc_poll_fwinit(ioc); + if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); } static void @@ -1676,7 +1867,7 @@ bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) /* * Initiate a full firmware download. */ -static void +static bfa_status_t bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) { @@ -1686,28 +1877,60 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, u32 chunkno = 0; u32 i; u32 asicmode; + u32 fwimg_size; + u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; + bfa_status_t status; + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); + + status = bfa_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + + bfa_trc(ioc, fwimg_size); - bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc))); - fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); pgoff = PSS_SMEM_PGOFF(loff); writel(pgnum, ioc->ioc_regs.host_page_num_fn); - for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) { + for (i = 0; i < fwimg_size; i++) { if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { chunkno = BFA_IOC_FLASH_CHUNK_NO(i); - fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + status = bfa_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), + fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg = bfa_cb_image_get_chunk( + bfa_ioc_asic_gen(ioc), BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } } /* * write smem */ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, - cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])); + fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); loff += sizeof(u32); @@ -1725,8 +1948,12 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, ioc->ioc_regs.host_page_num_fn); /* - * Set boot type and device mode at the end. + * Set boot type, env and device mode at the end. */ + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + boot_type = BFI_FWBOOT_TYPE_NORMAL; + } asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, ioc->port0_mode, ioc->port1_mode); bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, @@ -1735,6 +1962,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, swab32(boot_type)); bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF, swab32(boot_env)); + return BFA_STATUS_OK; } @@ -1850,7 +2078,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) bfa_trc(ioc, len); for (i = 0; i < len; i++) { r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); - buf[i] = be32_to_cpu(r32); + buf[i] = swab32(r32); loff += sizeof(u32); /* @@ -1994,28 +2222,51 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc) * Interface used by diag module to do firmware boot with memory test * as the entry vector. */ -void +bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) { + struct bfi_ioc_image_hdr_s *drv_fwhdr; + bfa_status_t status; bfa_ioc_stats(ioc, ioc_boots); if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) - return; + return BFA_STATUS_FAILED; + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_NORMAL) { + + drv_fwhdr = (struct bfi_ioc_image_hdr_s *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + + /* + * Work with Flash iff flash f/w is better than driver f/w. + * Otherwise push drivers firmware. + */ + if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == + BFI_IOC_IMG_VER_BETTER) + boot_type = BFI_FWBOOT_TYPE_FLASH; + } /* * Initialize IOC state of all functions on a chip reset. */ if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { - writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); } else { - writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); } bfa_ioc_msgflush(ioc); - bfa_ioc_download_fw(ioc, boot_type, boot_env); - bfa_ioc_lpu_start(ioc); + status = bfa_ioc_download_fw(ioc, boot_type, boot_env); + if (status == BFA_STATUS_OK) + bfa_ioc_lpu_start(ioc); + else { + WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST); + bfa_iocpf_timeout(ioc); + } + return status; } /* @@ -2038,7 +2289,7 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc) bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) { - u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); + u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc); return ((r32 != BFI_IOC_UNINIT) && (r32 != BFI_IOC_INITING) && @@ -2188,6 +2439,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, break; case BFA_PCI_DEVICE_ID_CT2: + case BFA_PCI_DEVICE_ID_CT2_QUAD: ioc->asic_gen = BFI_ASIC_GEN_CT2; if (clscode == BFI_PCIFN_CLASS_FC && pcidev->ssid == BFA_PCI_CT2_SSID_FC) { @@ -2410,14 +2662,6 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch); } -#define bfa_ioc_state_disabled(__sm) \ - (((__sm) == BFI_IOC_UNINIT) || \ - ((__sm) == BFI_IOC_INITING) || \ - ((__sm) == BFI_IOC_HWINIT) || \ - ((__sm) == BFI_IOC_DISABLED) || \ - ((__sm) == BFI_IOC_FAIL) || \ - ((__sm) == BFI_IOC_CFG_DISABLED)) - /* * Check if adapter is disabled -- both IOCs should be in a disabled * state. @@ -2430,12 +2674,12 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) return BFA_FALSE; - ioc_state = readl(ioc->ioc_regs.ioc_fwstate); + ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { - ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate); + ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; } @@ -2449,8 +2693,8 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc) { - writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); - writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); } #define BFA_MFG_NAME "Brocade" @@ -2500,6 +2744,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, ad_attr->mfg_day = ioc_attr->mfg_day; ad_attr->mfg_month = ioc_attr->mfg_month; ad_attr->mfg_year = ioc_attr->mfg_year; + memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN); } enum bfa_ioc_type_e @@ -2564,13 +2809,19 @@ void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) { struct bfi_ioc_attr_s *ioc_attr; + u8 nports = bfa_ioc_get_nports(ioc); WARN_ON(!model); memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); ioc_attr = ioc->attr; - snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", + if (bfa_asic_id_ct2(ioc->pcidev.device_id) && + (!bfa_mfg_is_mezz(ioc_attr->card_type))) + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s", + BFA_MFG_NAME, ioc_attr->card_type, nports, "p"); + else + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", BFA_MFG_NAME, ioc_attr->card_type); } @@ -2620,7 +2871,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); ioc_attr->state = bfa_ioc_get_state(ioc); - ioc_attr->port_id = ioc->port_id; + ioc_attr->port_id = bfa_ioc_portid(ioc); ioc_attr->port_mode = ioc->port_mode; ioc_attr->port_mode_cfg = ioc->port_mode_cfg; ioc_attr->cap_bm = ioc->ad_cap_bm; @@ -2629,8 +2880,9 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); - ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; - ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; + ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); + ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); + ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc)); bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } @@ -2917,7 +3169,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg) static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) { - u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); + u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); bfa_trc(ioc, fwstate); @@ -3626,7 +3878,7 @@ bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg) bfa_trc(sfp, sfp->data_valid); if (sfp->data_valid) { u32 size = sizeof(struct sfp_mem_s); - u8 *des = (u8 *) &(sfp->sfpmem->srlid_base); + u8 *des = (u8 *) &(sfp->sfpmem); memcpy(des, sfp->dbuf_kva, size); } /* @@ -6010,6 +6262,7 @@ bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) */ msg->last = (len == fru->residue) ? 1 : 0; + msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0; bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); bfa_alen_set(&msg->alen, len, fru->dbuf_pa); @@ -6124,13 +6377,14 @@ bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa, */ bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, - bfa_cb_fru_t cbfn, void *cbarg) + bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl) { bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ); bfa_trc(fru, len); bfa_trc(fru, offset); - if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 && + fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) return BFA_STATUS_FRU_NOT_PRESENT; if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) @@ -6152,6 +6406,7 @@ bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, fru->offset = 0; fru->addr_off = offset; fru->ubuf = buf; + fru->trfr_cmpl = trfr_cmpl; bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ); @@ -6181,7 +6436,8 @@ bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) return BFA_STATUS_FRU_NOT_PRESENT; - if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) + if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK && + fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2) return BFA_STATUS_CMD_NOTSUPP; if (!bfa_ioc_is_operational(fru->ioc)) @@ -6222,7 +6478,8 @@ bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size) if (!bfa_ioc_is_operational(fru->ioc)) return BFA_STATUS_IOC_NON_OP; - if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK) + if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK || + fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2) *max_size = BFA_FRU_CHINOOK_MAX_SIZE; else return BFA_STATUS_CMD_NOTSUPP; @@ -6401,3 +6658,408 @@ bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg) WARN_ON(1); } } + +/* + * register definitions + */ +#define FLI_CMD_REG 0x0001d000 +#define FLI_RDDATA_REG 0x0001d010 +#define FLI_ADDR_REG 0x0001d004 +#define FLI_DEV_STATUS_REG 0x0001d014 + +#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ +#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ +#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ +#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ + +enum bfa_flash_cmd { + BFA_FLASH_FAST_READ = 0x0b, /* fast read */ + BFA_FLASH_READ_STATUS = 0x05, /* read status */ +}; + +/** + * @brief hardware error definition + */ +enum bfa_flash_err { + BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ + BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ + BFA_FLASH_BAD = -3, /*!< flash bad */ + BFA_FLASH_BUSY = -4, /*!< flash busy */ + BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ + BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ + BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ + BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ + BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ +}; + +/** + * @brief flash command register data structure + */ +union bfa_flash_cmd_reg_u { + struct { +#ifdef __BIG_ENDIAN + u32 act:1; + u32 rsv:1; + u32 write_cnt:9; + u32 read_cnt:9; + u32 addr_cnt:4; + u32 cmd:8; +#else + u32 cmd:8; + u32 addr_cnt:4; + u32 read_cnt:9; + u32 write_cnt:9; + u32 rsv:1; + u32 act:1; +#endif + } r; + u32 i; +}; + +/** + * @brief flash device status register data structure + */ +union bfa_flash_dev_status_reg_u { + struct { +#ifdef __BIG_ENDIAN + u32 rsv:21; + u32 fifo_cnt:6; + u32 busy:1; + u32 init_status:1; + u32 present:1; + u32 bad:1; + u32 good:1; +#else + u32 good:1; + u32 bad:1; + u32 present:1; + u32 init_status:1; + u32 busy:1; + u32 fifo_cnt:6; + u32 rsv:21; +#endif + } r; + u32 i; +}; + +/** + * @brief flash address register data structure + */ +union bfa_flash_addr_reg_u { + struct { +#ifdef __BIG_ENDIAN + u32 addr:24; + u32 dummy:8; +#else + u32 dummy:8; + u32 addr:24; +#endif + } r; + u32 i; +}; + +/** + * dg flash_raw_private Flash raw private functions + */ +static void +bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, + u8 rd_cnt, u8 ad_cnt, u8 op) +{ + union bfa_flash_cmd_reg_u cmd; + + cmd.i = 0; + cmd.r.act = 1; + cmd.r.write_cnt = wr_cnt; + cmd.r.read_cnt = rd_cnt; + cmd.r.addr_cnt = ad_cnt; + cmd.r.cmd = op; + writel(cmd.i, (pci_bar + FLI_CMD_REG)); +} + +static void +bfa_flash_set_addr(void __iomem *pci_bar, u32 address) +{ + union bfa_flash_addr_reg_u addr; + + addr.r.addr = address & 0x00ffffff; + addr.r.dummy = 0; + writel(addr.i, (pci_bar + FLI_ADDR_REG)); +} + +static int +bfa_flash_cmd_act_check(void __iomem *pci_bar) +{ + union bfa_flash_cmd_reg_u cmd; + + cmd.i = readl(pci_bar + FLI_CMD_REG); + + if (cmd.r.act) + return BFA_FLASH_ERR_CMD_ACT; + + return 0; +} + +/** + * @brief + * Flush FLI data fifo. + * + * @param[in] pci_bar - pci bar address + * @param[in] dev_status - device status + * + * Return 0 on success, negative error number on error. + */ +static u32 +bfa_flash_fifo_flush(void __iomem *pci_bar) +{ + u32 i; + u32 t; + union bfa_flash_dev_status_reg_u dev_status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + + if (!dev_status.r.fifo_cnt) + return 0; + + /* fifo counter in terms of words */ + for (i = 0; i < dev_status.r.fifo_cnt; i++) + t = readl(pci_bar + FLI_RDDATA_REG); + + /* + * Check the device status. It may take some time. + */ + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + break; + } + + if (dev_status.r.fifo_cnt) + return BFA_FLASH_ERR_FIFO_CNT; + + return 0; +} + +/** + * @brief + * Read flash status. + * + * @param[in] pci_bar - pci bar address + * + * Return 0 on success, negative error number on error. +*/ +static u32 +bfa_flash_status_read(void __iomem *pci_bar) +{ + union bfa_flash_dev_status_reg_u dev_status; + int status; + u32 ret_status; + int i; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); + + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + status = bfa_flash_cmd_act_check(pci_bar); + if (!status) + break; + } + + if (status) + return status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + return BFA_FLASH_BUSY; + + ret_status = readl(pci_bar + FLI_RDDATA_REG); + ret_status >>= 24; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + return ret_status; +} + +/** + * @brief + * Start flash read operation. + * + * @param[in] pci_bar - pci bar address + * @param[in] offset - flash address offset + * @param[in] len - read data length + * @param[in] buf - read data buffer + * + * Return 0 on success, negative error number on error. + */ +static u32 +bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, + char *buf) +{ + int status; + + /* + * len must be mutiple of 4 and not exceeding fifo size + */ + if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) + return BFA_FLASH_ERR_LEN; + + /* + * check status + */ + status = bfa_flash_status_read(pci_bar); + if (status == BFA_FLASH_BUSY) + status = bfa_flash_status_read(pci_bar); + + if (status < 0) + return status; + + /* + * check if write-in-progress bit is cleared + */ + if (status & BFA_FLASH_WIP_MASK) + return BFA_FLASH_ERR_WIP; + + bfa_flash_set_addr(pci_bar, offset); + + bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); + + return 0; +} + +/** + * @brief + * Check flash read operation. + * + * @param[in] pci_bar - pci bar address + * + * Return flash device status, 1 if busy, 0 if not. + */ +static u32 +bfa_flash_read_check(void __iomem *pci_bar) +{ + if (bfa_flash_cmd_act_check(pci_bar)) + return 1; + + return 0; +} +/** + * @brief + * End flash read operation. + * + * @param[in] pci_bar - pci bar address + * @param[in] len - read data length + * @param[in] buf - read data buffer + * + */ +static void +bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) +{ + + u32 i; + + /* + * read data fifo up to 32 words + */ + for (i = 0; i < len; i += 4) { + u32 w = readl(pci_bar + FLI_RDDATA_REG); + *((u32 *) (buf + i)) = swab32(w); + } + + bfa_flash_fifo_flush(pci_bar); +} + +/** + * @brief + * Perform flash raw read. + * + * @param[in] pci_bar - pci bar address + * @param[in] offset - flash partition address offset + * @param[in] buf - read data buffer + * @param[in] len - read data length + * + * Return status. + */ + + +#define FLASH_BLOCKING_OP_MAX 500 +#define FLASH_SEM_LOCK_REG 0x18820 + +static int +bfa_raw_sem_get(void __iomem *bar) +{ + int locked; + + locked = readl((bar + FLASH_SEM_LOCK_REG)); + return !locked; + +} + +bfa_status_t +bfa_flash_sem_get(void __iomem *bar) +{ + u32 n = FLASH_BLOCKING_OP_MAX; + + while (!bfa_raw_sem_get(bar)) { + if (--n <= 0) + return BFA_STATUS_BADFLASH; + mdelay(10); + } + return BFA_STATUS_OK; +} + +void +bfa_flash_sem_put(void __iomem *bar) +{ + writel(0, (bar + FLASH_SEM_LOCK_REG)); +} + +bfa_status_t +bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, + u32 len) +{ + u32 n; + int status; + u32 off, l, s, residue, fifo_sz; + + residue = len; + off = 0; + fifo_sz = BFA_FLASH_FIFO_SIZE; + status = bfa_flash_sem_get(pci_bar); + if (status != BFA_STATUS_OK) + return status; + + while (residue) { + s = offset + off; + n = s / fifo_sz; + l = (n + 1) * fifo_sz - s; + if (l > residue) + l = residue; + + status = bfa_flash_read_start(pci_bar, offset + off, l, + &buf[off]); + if (status < 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + + n = BFA_FLASH_BLOCKING_OP_MAX; + while (bfa_flash_read_check(pci_bar)) { + if (--n <= 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + } + + bfa_flash_read_end(pci_bar, l, &buf[off]); + + residue -= l; + off += l; + } + bfa_flash_sem_put(pci_bar); + + return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 23a90e7b710..2e28392c2fb 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -346,6 +346,12 @@ struct bfa_ioc_hwif_s { void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc); + void (*ioc_set_fwstate) (struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc_s *ioc); + void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc); }; /* @@ -509,6 +515,8 @@ void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg); void bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); +bfa_status_t bfa_flash_raw_read(void __iomem *pci_bar_kva, + u32 offset, char *buf, u32 len); /* * DIAG module specific @@ -725,6 +733,7 @@ struct bfa_fru_s { struct bfa_mbox_cmd_s mb; /* mailbox */ struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */ struct bfa_mem_dma_s fru_dma; + u8 trfr_cmpl; }; #define BFA_FRU(__bfa) (&(__bfa)->modules.fru) @@ -732,7 +741,7 @@ struct bfa_fru_s { bfa_status_t bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, - bfa_cb_fru_t cbfn, void *cbarg); + bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl); bfa_status_t bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, bfa_cb_fru_t cbfn, void *cbarg); @@ -881,7 +890,7 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc); void bfa_ioc_disable(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); -void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, +bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env); void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); @@ -912,6 +921,7 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen); bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, u32 *offset, int *buflen); +bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr); @@ -949,6 +959,8 @@ bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off, + u32 *fwimg); /* * bfa mfg wwn API functions */ diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index 30df8a28471..453c2f5b556 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -22,6 +22,8 @@ BFA_TRC_FILE(CNA, IOC_CB); +#define bfa_ioc_cb_join_pos(__ioc) ((u32) (1 << BFA_IOC_CB_JOIN_SH)) + /* * forward declarations */ @@ -37,6 +39,12 @@ static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_set_cur_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_set_alt_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); static struct bfa_ioc_hwif_s hwif_cb; @@ -59,6 +67,10 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete; + hwif_cb.ioc_set_fwstate = bfa_ioc_cb_set_cur_ioc_fwstate; + hwif_cb.ioc_get_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate; + hwif_cb.ioc_set_alt_fwstate = bfa_ioc_cb_set_alt_ioc_fwstate; + hwif_cb.ioc_get_alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate; ioc->ioc_hwif = &hwif_cb; } @@ -69,6 +81,29 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) { + enum bfi_ioc_state alt_fwstate, cur_fwstate; + struct bfi_ioc_image_hdr_s fwhdr; + + cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); + bfa_trc(ioc, cur_fwstate); + alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); + bfa_trc(ioc, alt_fwstate); + + /* + * Uninit implies this is the only driver as of now. + */ + if (cur_fwstate == BFI_IOC_UNINIT) + return BFA_TRUE; + /* + * Check if another driver with a different firmware is active + */ + bfa_ioc_fwver_get(ioc, &fwhdr); + if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) && + alt_fwstate != BFI_IOC_DISABLED) { + bfa_trc(ioc, alt_fwstate); + return BFA_FALSE; + } + return BFA_TRUE; } @@ -187,6 +222,20 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) static bfa_boolean_t bfa_ioc_cb_sync_start(struct bfa_ioc_s *ioc) { + u32 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + /** + * Driver load time. If the join bit is set, + * it is due to an unclean exit by the driver for this + * PCI fn in the previous incarnation. Whoever comes here first + * should clean it up, no matter which PCI fn. + */ + if (ioc_fwstate & BFA_IOC_CB_JOIN_MASK) { + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + return BFA_TRUE; + } + return bfa_ioc_cb_sync_complete(ioc); } @@ -212,24 +261,66 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc) { + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); + u32 join_pos = bfa_ioc_cb_join_pos(ioc); + + writel((r32 | join_pos), ioc->ioc_regs.ioc_fwstate); } static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc) { + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); + u32 join_pos = bfa_ioc_cb_join_pos(ioc); + + writel((r32 & ~join_pos), ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_cb_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); + + writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), + ioc->ioc_regs.ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_cb_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state)(readl(ioc->ioc_regs.ioc_fwstate) & + BFA_IOC_CB_FWSTATE_MASK); +} + +static void +bfa_ioc_cb_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + u32 r32 = readl(ioc->ioc_regs.alt_ioc_fwstate); + + writel((fwstate | (r32 & BFA_IOC_CB_JOIN_MASK)), + ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_cb_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state)(readl(ioc->ioc_regs.alt_ioc_fwstate) & + BFA_IOC_CB_FWSTATE_MASK); } static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc) { - writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + bfa_ioc_cb_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); } static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) { - uint32_t fwstate, alt_fwstate; - fwstate = readl(ioc->ioc_regs.ioc_fwstate); + u32 fwstate, alt_fwstate; + fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); /* * At this point, this IOC is hoding the hw sem in the @@ -257,7 +348,7 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) fwstate == BFI_IOC_OP) return BFA_TRUE; else { - alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate); + alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); if (alt_fwstate == BFI_IOC_FAIL || alt_fwstate == BFI_IOC_DISABLED || alt_fwstate == BFI_IOC_UNINIT || @@ -272,7 +363,7 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) { - u32 pll_sclk, pll_fclk; + u32 pll_sclk, pll_fclk, join_bits; pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_P0_1(3U) | @@ -282,8 +373,12 @@ bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode) __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | __APP_PLL_LCLK_JITLMT0_1(3U) | __APP_PLL_LCLK_CNTLMT0_1(3U); - writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); - writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); + join_bits = readl(rb + BFA_IOC0_STATE_REG) & + BFA_IOC_CB_JOIN_MASK; + writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC0_STATE_REG)); + join_bits = readl(rb + BFA_IOC1_STATE_REG) & + BFA_IOC_CB_JOIN_MASK; + writel((BFI_IOC_UNINIT | join_bits), (rb + BFA_IOC1_STATE_REG)); writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index de4e726a126..bd53150e4ee 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -43,6 +43,12 @@ static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_set_cur_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_set_alt_ioc_fwstate( + struct bfa_ioc_s *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc); static struct bfa_ioc_hwif_s hwif_ct; static struct bfa_ioc_hwif_s hwif_ct2; @@ -512,6 +518,10 @@ bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif) hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave; hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack; hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete; + hwif->ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate; + hwif->ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate; + hwif->ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate; + hwif->ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate; } /** @@ -918,6 +928,16 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) } } + /* + * The very first PCIe DMA Read done by LPU fails with a fatal error, + * when Address Translation Cache (ATC) has been enabled by system BIOS. + * + * Workaround: + * Disable Invalidated Tag Match Enable capability by setting the bit 26 + * of CHIP_MISC_PRG to 0, by default it is set to 1. + */ + r32 = readl(rb + CT2_CHIP_MISC_PRG); + writel((r32 & 0xfbffffff), (rb + CT2_CHIP_MISC_PRG)); /* * Mask the interrupts and clear any @@ -949,3 +969,29 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) return BFA_STATUS_OK; } + +static void +bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc_s *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc_s *ioc) +{ + return (enum bfi_ioc_state) readl(ioc->ioc_regs.alt_ioc_fwstate); +} diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 299c1c889b3..625225f3108 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -70,6 +70,8 @@ enum bfa_fcport_sm_event { BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */ BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */ BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */ + BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */ + BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */ }; /* @@ -202,6 +204,8 @@ static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event); @@ -234,6 +238,7 @@ static struct bfa_sm_table_s hal_port_sm_table[] = { {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN}, {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN}, {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT}, + {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT}, {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG}, }; @@ -1276,7 +1281,6 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) switch (event) { case BFA_LPS_SM_FWRSP: - case BFA_LPS_SM_OFFLINE: if (lps->status == BFA_STATUS_OK) { bfa_sm_set_state(lps, bfa_lps_sm_online); if (lps->fdisc) @@ -1305,6 +1309,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) bfa_lps_login_comp(lps); break; + case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); break; @@ -1614,7 +1619,6 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) lps->lp_mac = rsp->lp_mac; lps->brcd_switch = rsp->brcd_switch; lps->fcf_mac = rsp->fcf_mac; - lps->pr_bbscn = rsp->bb_scn; break; @@ -1744,7 +1748,6 @@ bfa_lps_send_login(struct bfa_lps_s *lps) m->nwwn = lps->nwwn; m->fdisc = lps->fdisc; m->auth_en = lps->auth_en; - m->bb_scn = lps->bb_scn; bfa_reqq_produce(lps->bfa, lps->reqq, m->mh); list_del(&lps->qe); @@ -1940,7 +1943,7 @@ bfa_lps_delete(struct bfa_lps_s *lps) */ void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, - wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn) + wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en) { lps->uarg = uarg; lps->alpa = alpa; @@ -1949,7 +1952,6 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, lps->nwwn = nwwn; lps->fdisc = BFA_FALSE; lps->auth_en = auth_en; - lps->bb_scn = bb_scn; bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } @@ -2649,6 +2651,10 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, bfa_sm_set_state(fcport, bfa_fcport_sm_dport); break; + case BFA_FCPORT_SM_DDPORTENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_ddport); + break; + default: bfa_sm_fault(fcport->bfa, event); } @@ -2762,6 +2768,40 @@ bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) } static void +bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) +{ + bfa_trc(fcport->bfa, event); + + switch (event) { + case BFA_FCPORT_SM_DISABLE: + case BFA_FCPORT_SM_DDPORTDISABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + break; + + case BFA_FCPORT_SM_DPORTENABLE: + case BFA_FCPORT_SM_DPORTDISABLE: + case BFA_FCPORT_SM_ENABLE: + case BFA_FCPORT_SM_START: + /** + * Ignore event for a port that is ddport + */ + break; + + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + break; + + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + break; + + default: + bfa_sm_fault(fcport->bfa, event); + } +} + +static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { @@ -3082,6 +3122,8 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, port_cfg->qos_bw.med = BFA_QOS_BW_MED; port_cfg->qos_bw.low = BFA_QOS_BW_LOW; + fcport->fec_state = BFA_FEC_OFFLINE; + INIT_LIST_HEAD(&fcport->stats_pending_q); INIT_LIST_HEAD(&fcport->statsclr_pending_q); @@ -3158,6 +3200,11 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) fcport->qos_attr = pevent->link_state.qos_attr; fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr; + if (fcport->cfg.bb_cr_enabled) + fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr; + + fcport->fec_state = pevent->link_state.fec_state; + /* * update trunk state if applicable */ @@ -3177,7 +3224,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) { fcport->speed = BFA_PORT_SPEED_UNKNOWN; fcport->topology = BFA_PORT_TOPOLOGY_NONE; - fcport->bbsc_op_state = BFA_FALSE; + fcport->fec_state = BFA_FEC_OFFLINE; } /* @@ -3629,6 +3676,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) fcport->qos_attr.qos_bw_op = i2hmsg.penable_rsp->port_cfg.qos_bw; + if (fcport->cfg.bb_cr_enabled) + fcport->bbcr_attr.state = BFA_BBCR_OFFLINE; + else + fcport->bbcr_attr.state = BFA_BBCR_DISABLED; + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); } break; @@ -3639,6 +3691,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) break; case BFI_FCPORT_I2H_EVENT: + if (fcport->cfg.bb_cr_enabled) + fcport->bbcr_attr.state = BFA_BBCR_OFFLINE; + else + fcport->bbcr_attr.state = BFA_BBCR_DISABLED; + if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP) bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); else { @@ -3846,6 +3903,8 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) return BFA_STATUS_LOOP_UNSUPP_MEZZ; if (bfa_fcport_is_dport(bfa) != BFA_FALSE) return BFA_STATUS_DPORT_ERR; + if (bfa_fcport_is_ddport(bfa) != BFA_FALSE) + return BFA_STATUS_DPORT_ERR; break; case BFA_PORT_TOPOLOGY_AUTO: @@ -3964,14 +4023,11 @@ bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) } void -bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn) +bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); fcport->cfg.tx_bbcredit = (u8)tx_bbcredit; - fcport->cfg.bb_scn = bb_scn; - if (bb_scn) - fcport->bbsc_op_state = BFA_TRUE; } /* @@ -4021,7 +4077,8 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm); - attr->bbsc_op_status = fcport->bbsc_op_state; + + attr->fec_state = fcport->fec_state; /* PBC Disabled State */ if (bfa_fcport_is_pbcdisabled(bfa)) @@ -4115,6 +4172,15 @@ bfa_fcport_is_dport(struct bfa_s *bfa) BFA_PORT_ST_DPORT); } +bfa_boolean_t +bfa_fcport_is_ddport(struct bfa_s *bfa) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) == + BFA_PORT_ST_DDPORT); +} + bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw) { @@ -4217,6 +4283,77 @@ bfa_fcport_is_trunk_enabled(struct bfa_s *bfa) return fcport->cfg.trunked; } +bfa_status_t +bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + bfa_trc(bfa, on_off); + + if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC) + return BFA_STATUS_BBCR_FC_ONLY; + + if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) && + (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK)) + return BFA_STATUS_CMD_NOTSUPP_MEZZ; + + if (on_off) { + if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) + return BFA_STATUS_TOPOLOGY_LOOP; + + if (fcport->cfg.qos_enabled) + return BFA_STATUS_ERROR_QOS_ENABLED; + + if (fcport->cfg.trunked) + return BFA_STATUS_TRUNK_ENABLED; + + if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) && + (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc))) + return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT; + + if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS) + return BFA_STATUS_FEATURE_NOT_SUPPORTED; + + if (fcport->cfg.bb_cr_enabled) { + if (bb_scn != fcport->cfg.bb_scn) + return BFA_STATUS_BBCR_CFG_NO_CHANGE; + else + return BFA_STATUS_NO_CHANGE; + } + + if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX)) + bb_scn = BFA_BB_SCN_DEF; + + fcport->cfg.bb_cr_enabled = on_off; + fcport->cfg.bb_scn = bb_scn; + } else { + if (!fcport->cfg.bb_cr_enabled) + return BFA_STATUS_NO_CHANGE; + + fcport->cfg.bb_cr_enabled = on_off; + fcport->cfg.bb_scn = 0; + } + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, + struct bfa_bbcr_attr_s *bbcr_attr) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC) + return BFA_STATUS_BBCR_FC_ONLY; + + if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) + return BFA_STATUS_TOPOLOGY_LOOP; + + *bbcr_attr = fcport->bbcr_attr; + + return BFA_STATUS_OK; +} + void bfa_fcport_dportenable(struct bfa_s *bfa) { @@ -4237,6 +4374,24 @@ bfa_fcport_dportdisable(struct bfa_s *bfa) bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE); } +void +bfa_fcport_ddportenable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE); +} + +void +bfa_fcport_ddportdisable(struct bfa_s *bfa) +{ + /* + * Assume caller check for port is in disable state + */ + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE); +} + /* * Rport State machine functions */ @@ -5622,6 +5777,14 @@ bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw) * Dport forward declaration */ +enum bfa_dport_test_state_e { + BFA_DPORT_ST_DISABLED = 0, /*!< dport is disabled */ + BFA_DPORT_ST_INP = 1, /*!< test in progress */ + BFA_DPORT_ST_COMP = 2, /*!< test complete successfully */ + BFA_DPORT_ST_NO_SFP = 3, /*!< sfp is not present */ + BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */ +}; + /* * BFA DPORT state machine events */ @@ -5631,6 +5794,9 @@ enum bfa_dport_sm_event { BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */ BFA_DPORT_SM_QRESUME = 4, /* CQ space available */ BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */ + BFA_DPORT_SM_START = 6, /* re-start dport test */ + BFA_DPORT_SM_REQFAIL = 7, /* request failure */ + BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */ }; static void bfa_dport_sm_disabled(struct bfa_dport_s *dport, @@ -5645,9 +5811,19 @@ static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); static void bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event); +static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_starting(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); +static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event); static void bfa_dport_qresume(void *cbarg); static void bfa_dport_req_comp(struct bfa_dport_s *dport, - bfi_diag_dport_rsp_t *msg); + struct bfi_diag_dport_rsp_s *msg); +static void bfa_dport_scn(struct bfa_dport_s *dport, + struct bfi_diag_dport_scn_s *msg); /* * BFA fcdiag module @@ -5689,6 +5865,8 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport); dport->cbfn = NULL; dport->cbarg = NULL; + dport->test_state = BFA_DPORT_ST_DISABLED; + memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); } static void @@ -5891,7 +6069,12 @@ bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg) bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg); break; case BFI_DIAG_I2H_DPORT: - bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg); + bfa_dport_req_comp(&fcdiag->dport, + (struct bfi_diag_dport_rsp_s *)msg); + break; + case BFI_DIAG_I2H_DPORT_SCN: + bfa_dport_scn(&fcdiag->dport, + (struct bfi_diag_dport_scn_s *)msg); break; default: bfa_trc(fcdiag, msg->mhdr.msg_id); @@ -5986,7 +6169,11 @@ bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode, return BFA_STATUS_UNSUPP_SPEED; } } - + /* check to see if fcport is dport */ + if (bfa_fcport_is_dport(bfa)) { + bfa_trc(fcdiag, fcdiag->lb.lock); + return BFA_STATUS_DPORT_ENABLED; + } /* check to see if there is another destructive diag cmd running */ if (fcdiag->lb.lock) { bfa_trc(fcdiag, fcdiag->lb.lock); @@ -6090,6 +6277,15 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa) /* * D-port */ +#define bfa_dport_result_start(__dport, __mode) do { \ + (__dport)->result.start_time = bfa_get_log_time(); \ + (__dport)->result.status = DPORT_TEST_ST_INPRG; \ + (__dport)->result.mode = (__mode); \ + (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \ + (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \ + (__dport)->result.lpcnt = (__dport)->lpcnt; \ +} while (0) + static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req); static void @@ -6124,6 +6320,18 @@ bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) /* ignore */ break; + case BFA_DPORT_SM_SCN: + if (dport->i2hmsg.scn.state == BFI_DPORT_SCN_DDPORT_ENABLE) { + bfa_fcport_ddportenable(dport->bfa); + dport->dynamic = BFA_TRUE; + dport->test_state = BFA_DPORT_ST_NOTSTART; + bfa_sm_set_state(dport, bfa_dport_sm_enabled); + } else { + bfa_trc(dport->bfa, dport->i2hmsg.scn.state); + WARN_ON(1); + } + break; + default: bfa_sm_fault(dport->bfa, event); } @@ -6159,9 +6367,23 @@ bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) switch (event) { case BFA_DPORT_SM_FWRSP: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) { + dport->test_state = BFA_DPORT_ST_NO_SFP; + } else { + dport->test_state = BFA_DPORT_ST_INP; + bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO); + } bfa_sm_set_state(dport, bfa_dport_sm_enabled); break; + case BFA_DPORT_SM_REQFAIL: + dport->test_state = BFA_DPORT_ST_DISABLED; + bfa_fcport_dportdisable(dport->bfa); + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + break; + case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); @@ -6178,8 +6400,11 @@ bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) bfa_trc(dport->bfa, event); switch (event) { - case BFA_DPORT_SM_ENABLE: - /* Already enabled */ + case BFA_DPORT_SM_START: + if (bfa_dport_send_req(dport, BFI_DPORT_START)) + bfa_sm_set_state(dport, bfa_dport_sm_starting); + else + bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait); break; case BFA_DPORT_SM_DISABLE: @@ -6194,6 +6419,48 @@ bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) bfa_sm_set_state(dport, bfa_dport_sm_disabled); break; + case BFA_DPORT_SM_SCN: + switch (dport->i2hmsg.scn.state) { + case BFI_DPORT_SCN_TESTCOMP: + dport->test_state = BFA_DPORT_ST_COMP; + break; + + case BFI_DPORT_SCN_TESTSTART: + dport->test_state = BFA_DPORT_ST_INP; + break; + + case BFI_DPORT_SCN_TESTSKIP: + case BFI_DPORT_SCN_SUBTESTSTART: + /* no state change */ + break; + + case BFI_DPORT_SCN_SFP_REMOVED: + dport->test_state = BFA_DPORT_ST_NO_SFP; + break; + + case BFI_DPORT_SCN_DDPORT_DISABLE: + bfa_fcport_ddportdisable(dport->bfa); + + if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE)) + bfa_sm_set_state(dport, + bfa_dport_sm_dynamic_disabling); + else + bfa_sm_set_state(dport, + bfa_dport_sm_dynamic_disabling_qwait); + break; + + case BFI_DPORT_SCN_FCPORT_DISABLE: + bfa_fcport_ddportdisable(dport->bfa); + + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + dport->dynamic = BFA_FALSE; + break; + + default: + bfa_trc(dport->bfa, dport->i2hmsg.scn.state); + bfa_sm_fault(dport->bfa, event); + } + break; default: bfa_sm_fault(dport->bfa, event); } @@ -6217,6 +6484,10 @@ bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport, bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); break; + case BFA_DPORT_SM_SCN: + /* ignore */ + break; + default: bfa_sm_fault(dport->bfa, event); } @@ -6229,7 +6500,98 @@ bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) switch (event) { case BFA_DPORT_SM_FWRSP: + dport->test_state = BFA_DPORT_ST_DISABLED; + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + case BFA_DPORT_SM_SCN: + /* no state change */ + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_starting); + bfa_dport_send_req(dport, BFI_DPORT_START); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_FWRSP: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) { + dport->test_state = BFA_DPORT_ST_NO_SFP; + } else { + dport->test_state = BFA_DPORT_ST_INP; + bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU); + } + /* fall thru */ + + case BFA_DPORT_SM_REQFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_enabled); + break; + + case BFA_DPORT_SM_HWFAIL: bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED); + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} + +static void +bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_SCN: + switch (dport->i2hmsg.scn.state) { + case BFI_DPORT_SCN_DDPORT_DISABLED: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + dport->dynamic = BFA_FALSE; + bfa_fcport_enable(dport->bfa); + break; + + default: + bfa_trc(dport->bfa, dport->i2hmsg.scn.state); + bfa_sm_fault(dport->bfa, event); + + } break; case BFA_DPORT_SM_HWFAIL: @@ -6242,6 +6604,32 @@ bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event) } } +static void +bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport, + enum bfa_dport_sm_event event) +{ + bfa_trc(dport->bfa, event); + + switch (event) { + case BFA_DPORT_SM_QRESUME: + bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling); + bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE); + break; + + case BFA_DPORT_SM_HWFAIL: + bfa_sm_set_state(dport, bfa_dport_sm_disabled); + bfa_reqq_wcancel(&dport->reqq_wait); + bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK); + break; + + case BFA_DPORT_SM_SCN: + /* ignore */ + break; + + default: + bfa_sm_fault(dport->bfa, event); + } +} static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) @@ -6249,12 +6637,6 @@ bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) struct bfi_diag_dport_req_s *m; /* - * Increment message tag before queue check, so that responses to old - * requests are discarded. - */ - dport->msgtag++; - - /* * check for room in queue to send request now */ m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG); @@ -6266,7 +6648,10 @@ bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req) bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT, bfa_fn_lpu(dport->bfa)); m->req = req; - m->msgtag = dport->msgtag; + if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) { + m->lpcnt = cpu_to_be32(dport->lpcnt); + m->payload = cpu_to_be32(dport->payload); + } /* * queue I/O message to firmware @@ -6285,19 +6670,131 @@ bfa_dport_qresume(void *cbarg) } static void -bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg) +bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg) { - bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP); + msg->status = cpu_to_be32(msg->status); + dport->i2hmsg.rsp.status = msg->status; + dport->rp_pwwn = msg->pwwn; + dport->rp_nwwn = msg->nwwn; + + if ((msg->status == BFA_STATUS_OK) || + (msg->status == BFA_STATUS_DPORT_NO_SFP)) { + bfa_trc(dport->bfa, msg->status); + bfa_trc(dport->bfa, dport->rp_pwwn); + bfa_trc(dport->bfa, dport->rp_nwwn); + bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP); + + } else { + bfa_trc(dport->bfa, msg->status); + bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL); + } bfa_cb_fcdiag_dport(dport, msg->status); } +static bfa_boolean_t +bfa_dport_is_sending_req(struct bfa_dport_s *dport) +{ + if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || + bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) || + bfa_sm_cmp_state(dport, bfa_dport_sm_starting) || + bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) { + return BFA_TRUE; + } else { + return BFA_FALSE; + } +} + +static void +bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg) +{ + int i; + uint8_t subtesttype; + + bfa_trc(dport->bfa, msg->state); + dport->i2hmsg.scn.state = msg->state; + + switch (dport->i2hmsg.scn.state) { + case BFI_DPORT_SCN_TESTCOMP: + dport->result.end_time = bfa_get_log_time(); + bfa_trc(dport->bfa, dport->result.end_time); + + dport->result.status = msg->info.testcomp.status; + bfa_trc(dport->bfa, dport->result.status); + + dport->result.roundtrip_latency = + cpu_to_be32(msg->info.testcomp.latency); + dport->result.est_cable_distance = + cpu_to_be32(msg->info.testcomp.distance); + dport->result.buffer_required = + be16_to_cpu(msg->info.testcomp.numbuffer); + + dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz); + dport->result.speed = msg->info.testcomp.speed; + + bfa_trc(dport->bfa, dport->result.roundtrip_latency); + bfa_trc(dport->bfa, dport->result.est_cable_distance); + bfa_trc(dport->bfa, dport->result.buffer_required); + bfa_trc(dport->bfa, dport->result.frmsz); + bfa_trc(dport->bfa, dport->result.speed); + + for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) { + dport->result.subtest[i].status = + msg->info.testcomp.subtest_status[i]; + bfa_trc(dport->bfa, dport->result.subtest[i].status); + } + break; + + case BFI_DPORT_SCN_TESTSKIP: + case BFI_DPORT_SCN_DDPORT_ENABLE: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + break; + + case BFI_DPORT_SCN_TESTSTART: + memset(&dport->result, 0, + sizeof(struct bfa_diag_dport_result_s)); + dport->rp_pwwn = msg->info.teststart.pwwn; + dport->rp_nwwn = msg->info.teststart.nwwn; + dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm); + bfa_dport_result_start(dport, msg->info.teststart.mode); + break; + + case BFI_DPORT_SCN_SUBTESTSTART: + subtesttype = msg->info.teststart.type; + dport->result.subtest[subtesttype].start_time = + bfa_get_log_time(); + dport->result.subtest[subtesttype].status = + DPORT_TEST_ST_INPRG; + + bfa_trc(dport->bfa, subtesttype); + bfa_trc(dport->bfa, + dport->result.subtest[subtesttype].start_time); + break; + + case BFI_DPORT_SCN_SFP_REMOVED: + case BFI_DPORT_SCN_DDPORT_DISABLED: + case BFI_DPORT_SCN_DDPORT_DISABLE: + case BFI_DPORT_SCN_FCPORT_DISABLE: + dport->result.status = DPORT_TEST_ST_IDLE; + break; + + default: + bfa_sm_fault(dport->bfa, msg->state); + } + + bfa_sm_send_event(dport, BFA_DPORT_SM_SCN); +} + /* * Dport enable * * @param[in] *bfa - bfa data struct */ bfa_status_t -bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) +bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; @@ -6311,6 +6808,14 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) } /* + * Dport is supported in CT2 or above + */ + if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) { + bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id); + return BFA_STATUS_FEATURE_NOT_SUPPORTED; + } + + /* * Check to see if IOC is down */ if (!bfa_iocfc_is_operational(bfa)) @@ -6348,6 +6853,14 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) } /* + * Check if diag loopback is running + */ + if (bfa_fcdiag_lb_is_running(bfa)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DIAG_BUSY; + } + + /* * Check to see if port is disable or in dport state */ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && @@ -6357,14 +6870,16 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) } /* + * Check if dport is in dynamic mode + */ + if (dport->dynamic) + return BFA_STATUS_DDPORT_ERR; + + /* * Check if dport is busy */ - if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || - bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || - bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || - bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) { + if (bfa_dport_is_sending_req(dport)) return BFA_STATUS_DEVBUSY; - } /* * Check if dport is already enabled @@ -6374,6 +6889,10 @@ bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) return BFA_STATUS_DPORT_ENABLED; } + bfa_trc(dport->bfa, lpcnt); + bfa_trc(dport->bfa, pat); + dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT; + dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT; dport->cbfn = cbfn; dport->cbarg = cbarg; @@ -6402,6 +6921,13 @@ bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) } /* + * Check if dport is in dynamic mode + */ + if (dport->dynamic) { + return BFA_STATUS_DDPORT_ERR; + } + + /* * Check to see if port is disable or in dport state */ if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) && @@ -6413,10 +6939,7 @@ bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) /* * Check if dport is busy */ - if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || - bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) || - bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || - bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) + if (bfa_dport_is_sending_req(dport)) return BFA_STATUS_DEVBUSY; /* @@ -6435,30 +6958,105 @@ bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg) } /* - * Get D-port state + * Dport start -- restart dport test * - * @param[in] *bfa - bfa data struct + * @param[in] *bfa - bfa data struct */ +bfa_status_t +bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg) +{ + struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); + struct bfa_dport_s *dport = &fcdiag->dport; + + /* + * Check to see if IOC is down + */ + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* + * Check if dport is in dynamic mode + */ + if (dport->dynamic) + return BFA_STATUS_DDPORT_ERR; + + /* + * Check if dport is busy + */ + if (bfa_dport_is_sending_req(dport)) + return BFA_STATUS_DEVBUSY; + /* + * Check if dport is in enabled state. + * Test can only be restart when previous test has completed + */ + if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_DISABLED; + + } else { + if (dport->test_state == BFA_DPORT_ST_NO_SFP) + return BFA_STATUS_DPORT_INV_SFP; + + if (dport->test_state == BFA_DPORT_ST_INP) + return BFA_STATUS_DEVBUSY; + + WARN_ON(dport->test_state != BFA_DPORT_ST_COMP); + } + + bfa_trc(dport->bfa, lpcnt); + bfa_trc(dport->bfa, pat); + + dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT; + dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT; + + dport->cbfn = cbfn; + dport->cbarg = cbarg; + + bfa_sm_send_event(dport, BFA_DPORT_SM_START); + return BFA_STATUS_OK; +} + +/* + * Dport show -- return dport test result + * + * @param[in] *bfa - bfa data struct + */ bfa_status_t -bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state) +bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); struct bfa_dport_s *dport = &fcdiag->dport; - if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) - *state = BFA_DPORT_ST_ENABLED; - else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) || - bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait)) - *state = BFA_DPORT_ST_ENABLING; - else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) - *state = BFA_DPORT_ST_DISABLED; - else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) || - bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) - *state = BFA_DPORT_ST_DISABLING; - else { - bfa_trc(dport->bfa, BFA_STATUS_EINVAL); - return BFA_STATUS_EINVAL; + /* + * Check to see if IOC is down + */ + if (!bfa_iocfc_is_operational(bfa)) + return BFA_STATUS_IOC_NON_OP; + + /* + * Check if dport is busy + */ + if (bfa_dport_is_sending_req(dport)) + return BFA_STATUS_DEVBUSY; + + /* + * Check if dport is in enabled state. + */ + if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) { + bfa_trc(dport->bfa, 0); + return BFA_STATUS_DPORT_DISABLED; + } + + /* + * Check if there is SFP + */ + if (dport->test_state == BFA_DPORT_ST_NO_SFP) + return BFA_STATUS_DPORT_INV_SFP; + + memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s)); + return BFA_STATUS_OK; } diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 8d7fbecfcb2..ef07365991e 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -405,8 +405,6 @@ struct bfa_lps_s { bfa_status_t status; /* login status */ u16 pdusz; /* max receive PDU size */ u16 pr_bbcred; /* BB_CREDIT from peer */ - u8 pr_bbscn; /* BB_SCN from peer */ - u8 bb_scn; /* local BB_SCN */ u8 lsrjt_rsn; /* LSRJT reason */ u8 lsrjt_expl; /* LSRJT explanation */ u8 lun_mask; /* LUN mask flag */ @@ -510,11 +508,12 @@ struct bfa_fcport_s { bfa_boolean_t diag_busy; /* diag busy status */ bfa_boolean_t beacon; /* port beacon status */ bfa_boolean_t link_e2e_beacon; /* link beacon status */ - bfa_boolean_t bbsc_op_state; /* Cred recov Oper State */ struct bfa_fcport_trunk_s trunk; u16 fcoe_vlan; struct bfa_mem_dma_s fcport_dma; bfa_boolean_t stats_dma_ready; + struct bfa_bbcr_attr_s bbcr_attr; + enum bfa_fec_state_s fec_state; }; #define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) @@ -552,11 +551,12 @@ void bfa_fcport_event_register(struct bfa_s *bfa, enum bfa_port_linkstate event), void *event_cbarg); bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); bfa_boolean_t bfa_fcport_is_dport(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_ddport(struct bfa_s *bfa); bfa_status_t bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw); enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); -void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn); +void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon); @@ -571,6 +571,10 @@ void bfa_fcport_dportenable(struct bfa_s *bfa); void bfa_fcport_dportdisable(struct bfa_s *bfa); bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa); void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state); +bfa_status_t bfa_fcport_cfg_bbcr(struct bfa_s *bfa, + bfa_boolean_t on_off, u8 bb_scn); +bfa_status_t bfa_fcport_get_bbcr_attr(struct bfa_s *bfa, + struct bfa_bbcr_attr_s *bbcr_attr); /* * bfa rport API functions @@ -667,7 +671,7 @@ struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); void bfa_lps_delete(struct bfa_lps_s *lps); void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, wwn_t pwwn, wwn_t nwwn, - bfa_boolean_t auth_en, u8 bb_scn); + bfa_boolean_t auth_en); void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, wwn_t nwwn); void bfa_lps_fdisclogo(struct bfa_lps_s *lps); @@ -712,10 +716,18 @@ struct bfa_fcdiag_lb_s { struct bfa_dport_s { struct bfa_s *bfa; /* Back pointer to BFA */ bfa_sm_t sm; /* finite state machine */ - u32 msgtag; /* firmware msg tag for reply */ struct bfa_reqq_wait_s reqq_wait; bfa_cb_diag_t cbfn; void *cbarg; + union bfi_diag_dport_msg_u i2hmsg; + u8 test_state; /* enum dport_test_state */ + u8 dynamic; /* boolean_t */ + u8 rsvd[2]; + u32 lpcnt; + u32 payload; /* user defined payload pattern */ + wwn_t rp_pwwn; + wwn_t rp_nwwn; + struct bfa_diag_dport_result_s result; }; struct bfa_fcdiag_s { @@ -739,11 +751,13 @@ bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore, u32 queue, struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn, void *cbarg); bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa); -bfa_status_t bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, - void *cbarg); +bfa_status_t bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg); bfa_status_t bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg); -bfa_status_t bfa_dport_get_state(struct bfa_s *bfa, - enum bfa_dport_state *state); +bfa_status_t bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat, + bfa_cb_diag_t cbfn, void *cbarg); +bfa_status_t bfa_dport_show(struct bfa_s *bfa, + struct bfa_diag_dport_result_s *result); #endif /* __BFA_SVC_H__ */ diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index a5f7690e819..7593b7c1d33 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -#define BFAD_FW_FILE_CB "cbfw-3.1.0.0.bin" -#define BFAD_FW_FILE_CT "ctfw-3.1.0.0.bin" -#define BFAD_FW_FILE_CT2 "ct2fw-3.1.0.0.bin" +#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin" +#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin" +#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" static u32 *bfad_load_fwimg(struct pci_dev *pdev); static void bfad_free_fwimg(void); @@ -204,6 +204,7 @@ static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) { unsigned long flags; + bfa_status_t ret; bfa_trc(bfad, event); @@ -217,7 +218,7 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) if (bfad_setup_intr(bfad)) { printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", bfad->inst_no); - bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); break; } @@ -242,8 +243,26 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) printk(KERN_WARNING "bfa %s: bfa init failed\n", bfad->pci_name); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_init(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); + if (ret != BFA_STATUS_OK) { + init_completion(&bfad->comp); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->pport.flags |= BFAD_PORT_DELETE; + bfa_fcs_exit(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + wait_for_completion(&bfad->comp); + + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + break; + } bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; - bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED); } break; @@ -273,12 +292,14 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) spin_unlock_irqrestore(&bfad->bfad_lock, flags); retval = bfad_start_ops(bfad); - if (retval != BFA_STATUS_OK) + if (retval != BFA_STATUS_OK) { + bfa_sm_set_state(bfad, bfad_sm_failed); break; + } bfa_sm_set_state(bfad, bfad_sm_operational); break; - case BFAD_E_INTR_INIT_FAILED: + case BFAD_E_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -286,7 +307,7 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) spin_unlock_irqrestore(&bfad->bfad_lock, flags); break; - case BFAD_E_INIT_FAILED: + case BFAD_E_HAL_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_failed); break; default: @@ -310,13 +331,8 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) break; case BFAD_E_STOP: - if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) - bfad_uncfg_pport(bfad); - if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { - bfad_im_probe_undo(bfad); - bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; - } - bfad_stop(bfad); + bfa_sm_set_state(bfad, bfad_sm_fcs_exit); + bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); break; case BFAD_E_EXIT_COMP: @@ -491,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) struct bfad_vport_s *vport; int rc; - vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); + vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC); if (!vport) { bfa_trc(bfad, 0); return; @@ -766,49 +782,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) bfad->pcidev = pdev; /* Adjust PCIe Maximum Read Request Size */ - if (pcie_max_read_reqsz > 0) { - int pcie_cap_reg; - u16 pcie_dev_ctl; - u16 mask = 0xffff; - - switch (pcie_max_read_reqsz) { - case 128: - mask = 0x0; - break; - case 256: - mask = 0x1000; - break; - case 512: - mask = 0x2000; - break; - case 1024: - mask = 0x3000; - break; - case 2048: - mask = 0x4000; - break; - case 4096: - mask = 0x5000; - break; - default: - break; - } - - pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (mask != 0xffff && pcie_cap_reg) { - pcie_cap_reg += 0x08; - pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); - if ((pcie_dev_ctl & 0x7000) != mask) { - printk(KERN_WARNING "BFA[%s]: " + if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { + if (pcie_max_read_reqsz >= 128 && + pcie_max_read_reqsz <= 4096 && + is_power_of_2(pcie_max_read_reqsz)) { + int max_rq = pcie_get_readrq(pdev); + printk(KERN_WARNING "BFA[%s]: " "pcie_max_read_request_size is %d, " - "reset to %d\n", bfad->pci_name, - (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, + "reset to %d\n", bfad->pci_name, max_rq, pcie_max_read_reqsz); - - pcie_dev_ctl &= ~0x7000; - pci_write_config_word(pdev, pcie_cap_reg, - pcie_dev_ctl | mask); - } + pcie_set_readrq(pdev, pcie_max_read_reqsz); + } else { + printk(KERN_WARNING "BFA[%s]: invalid " + "pcie_max_read_request_size %d ignored\n", + bfad->pci_name, pcie_max_read_reqsz); } } @@ -833,7 +820,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) /* Disable PCIE Advanced Error Recovery (AER) */ pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } bfa_status_t @@ -854,7 +840,7 @@ bfad_drv_init(struct bfad_s *bfad) printk(KERN_WARNING "Not enough memory to attach all Brocade HBA ports, %s", "System may need more memory.\n"); - goto out_hal_mem_alloc_failure; + return BFA_STATUS_FAILED; } bfad->bfa.trcmod = bfad->trcmod; @@ -871,31 +857,11 @@ bfad_drv_init(struct bfad_s *bfad) bfad->bfa_fcs.trcmod = bfad->trcmod; bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); bfad->bfa_fcs.fdmi_enabled = fdmi_enable; - bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; - /* configure base port */ - rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); - if (rc != BFA_STATUS_OK) - goto out_cfg_pport_fail; - return BFA_STATUS_OK; - -out_cfg_pport_fail: - /* fcs exit - on cfg pport failure */ - spin_lock_irqsave(&bfad->bfad_lock, flags); - init_completion(&bfad->comp); - bfad->pport.flags |= BFAD_PORT_DELETE; - bfa_fcs_exit(&bfad->bfa_fcs); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - wait_for_completion(&bfad->comp); - /* bfa detach - free hal memory */ - bfa_detach(&bfad->bfa); - bfad_hal_mem_release(bfad); -out_hal_mem_alloc_failure: - return BFA_STATUS_FAILED; } void @@ -1039,13 +1005,19 @@ bfad_start_ops(struct bfad_s *bfad) { /* FCS driver info init */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); + + if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) + bfa_fcs_update_cfg(&bfad->bfa_fcs); + else + bfa_fcs_init(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); - /* - * FCS update cfg - reset the pwwn/nwwn of fabric base logical port - * with values learned during bfa_init firmware GETATTR REQ. - */ - bfa_fcs_update_cfg(&bfad->bfa_fcs); + if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) { + retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); + if (retval != BFA_STATUS_OK) + return BFA_STATUS_FAILED; + } /* Setup fc host fixed attribute if the lk supports */ bfad_fc_host_init(bfad->pport.im_port); @@ -1056,10 +1028,6 @@ bfad_start_ops(struct bfad_s *bfad) { printk(KERN_WARNING "bfad_im_probe failed\n"); if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) bfa_sm_set_state(bfad, bfad_sm_failed); - bfad_im_probe_undo(bfad); - bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; - bfad_uncfg_pport(bfad); - bfad_stop(bfad); return BFA_STATUS_FAILED; } else bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; @@ -1429,7 +1397,6 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) return 0; out_bfad_sm_failure: - bfa_detach(&bfad->bfa); bfad_hal_mem_release(bfad); out_drv_init_failure: /* Remove the debugfs node for this bfad */ @@ -1564,7 +1531,7 @@ restart_bfa(struct bfad_s *bfad) if (bfad_setup_intr(bfad)) { dev_printk(KERN_WARNING, &pdev->dev, "%s: bfad_setup_intr failed\n", bfad->pci_name); - bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); return -1; } @@ -1720,6 +1687,14 @@ struct pci_device_id bfad_id_table[] = { .class_mask = ~0, }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT2_QUAD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, {0, 0}, }; @@ -1824,7 +1799,7 @@ out: static u32 * bfad_load_fwimg(struct pci_dev *pdev) { - if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { + if (bfa_asic_id_ct2(pdev->device)) { if (bfi_image_ct2_size == 0) bfad_read_firmware(pdev, &bfi_image_ct2, &bfi_image_ct2_size, BFAD_FW_FILE_CT2); @@ -1834,12 +1809,14 @@ bfad_load_fwimg(struct pci_dev *pdev) bfad_read_firmware(pdev, &bfi_image_ct, &bfi_image_ct_size, BFAD_FW_FILE_CT); return bfi_image_ct; - } else { + } else if (bfa_asic_id_cb(pdev->device)) { if (bfi_image_cb_size == 0) bfad_read_firmware(pdev, &bfi_image_cb, &bfi_image_cb_size, BFAD_FW_FILE_CB); return bfi_image_cb; } + + return NULL; } static void diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 72f5dc32cc1..40be670a1cb 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -335,23 +335,10 @@ bfad_im_reset_stats(struct Scsi_Host *shost) } /* - * FC transport template entry, get rport loss timeout. - */ -static void -bfad_im_get_rport_loss_tmo(struct fc_rport *rport) -{ - struct bfad_itnim_data_s *itnim_data = rport->dd_data; - struct bfad_itnim_s *itnim = itnim_data->itnim; - struct bfad_s *bfad = itnim->im->bfad; - unsigned long flags; - - spin_lock_irqsave(&bfad->bfad_lock, flags); - rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); -} - -/* * FC transport template entry, set rport loss timeout. + * Update dev_loss_tmo based on the value pushed down by the stack + * In case it is lesser than path_tov of driver, set it to path_tov + 1 + * to ensure that the driver times out before the application */ static void bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) @@ -359,15 +346,11 @@ bfad_im_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) struct bfad_itnim_data_s *itnim_data = rport->dd_data; struct bfad_itnim_s *itnim = itnim_data->itnim; struct bfad_s *bfad = itnim->im->bfad; - unsigned long flags; - - if (timeout > 0) { - spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_fcpim_path_tov_set(&bfad->bfa, timeout); - rport->dev_loss_tmo = bfa_fcpim_path_tov_get(&bfad->bfa); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - } + uint16_t path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); + rport->dev_loss_tmo = timeout; + if (timeout < path_tov) + rport->dev_loss_tmo = path_tov + 1; } static int @@ -610,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport) return; spin_lock_irqsave(&bfad->bfad_lock, flags); - if (strlen(sym_name) > 0) { - strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name); - bfa_fcs_lport_ns_util_send_rspn_id( - BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL); - } + if (strlen(sym_name) > 0) + bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } @@ -665,7 +645,6 @@ struct fc_function_template bfad_im_fc_function_template = { .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, - .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, .issue_fc_host_lip = bfad_im_issue_fc_host_lip, .vport_create = bfad_im_vport_create, @@ -723,7 +702,6 @@ struct fc_function_template bfad_im_vport_fc_function_template = { .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_rport_dev_loss_tmo = 1, - .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, }; diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 555e7db94a1..8994fb857ee 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -229,6 +229,18 @@ bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) } int +bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; @@ -402,25 +414,43 @@ bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) } int -bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) +bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { - struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); - unsigned long flags; + struct bfa_bsg_bbcr_enable_s *iocmd = + (struct bfa_bsg_bbcr_enable_s *)pcmd; + unsigned long flags; + int rc; spin_lock_irqsave(&bfad->bfad_lock, flags); - if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { - if (v_cmd == IOCMD_PORT_BBSC_ENABLE) - fcport->cfg.bb_scn_state = BFA_TRUE; - else if (v_cmd == IOCMD_PORT_BBSC_DISABLE) - fcport->cfg.bb_scn_state = BFA_FALSE; + if (cmd == IOCMD_PORT_BBCR_ENABLE) + rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn); + else if (cmd == IOCMD_PORT_BBCR_DISABLE) + rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0); + else { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return -EINVAL; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); - iocmd->status = BFA_STATUS_OK; + iocmd->status = rc; return 0; } +int +bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd; + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = + bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + return 0; +} + + static int bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) { @@ -1767,51 +1797,87 @@ bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) } int -bfad_iocmd_diag_cfg_dport(struct bfad_s *bfad, unsigned int cmd, void *pcmd) +bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd) { - struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + struct bfa_bsg_dport_enable_s *iocmd = + (struct bfa_bsg_dport_enable_s *)pcmd; unsigned long flags; struct bfad_hal_comp fcomp; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); - if (cmd == IOCMD_DIAG_DPORT_ENABLE) - iocmd->status = bfa_dport_enable(&bfad->bfa, - bfad_hcb_comp, &fcomp); - else if (cmd == IOCMD_DIAG_DPORT_DISABLE) - iocmd->status = bfa_dport_disable(&bfad->bfa, - bfad_hcb_comp, &fcomp); + iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt, + iocmd->pat, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if (iocmd->status != BFA_STATUS_OK) + bfa_trc(bfad, iocmd->status); else { - bfa_trc(bfad, 0); - spin_unlock_irqrestore(&bfad->bfad_lock, flags); - return -EINVAL; + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; } - spin_unlock_irqrestore(&bfad->bfad_lock, flags); + return 0; +} + +int +bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) bfa_trc(bfad, iocmd->status); else { wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; } + return 0; +} + +int +bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd) +{ + struct bfa_bsg_dport_enable_s *iocmd = + (struct bfa_bsg_dport_enable_s *)pcmd; + unsigned long flags; + struct bfad_hal_comp fcomp; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt, + iocmd->pat, bfad_hcb_comp, + &fcomp); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + if (iocmd->status != BFA_STATUS_OK) { + bfa_trc(bfad, iocmd->status); + } else { + wait_for_completion(&fcomp.comp); + iocmd->status = fcomp.status; + } return 0; } int -bfad_iocmd_diag_dport_get_state(struct bfad_s *bfad, void *pcmd) +bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd) { - struct bfa_bsg_diag_dport_get_state_s *iocmd = - (struct bfa_bsg_diag_dport_get_state_s *)pcmd; - unsigned long flags; + struct bfa_bsg_diag_dport_show_s *iocmd = + (struct bfa_bsg_diag_dport_show_s *)pcmd; + unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); - iocmd->status = bfa_dport_get_state(&bfad->bfa, &iocmd->state); + iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } + int bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) { @@ -2238,8 +2304,10 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) spin_lock_irqsave(&bfad->bfad_lock, flags); - if (bfa_fcport_is_dport(&bfad->bfa)) + if (bfa_fcport_is_dport(&bfad->bfa)) { + spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_DPORT_ERR; + } if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) || (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) @@ -2662,7 +2730,7 @@ bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd) spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa), &iocmd->data, iocmd->len, iocmd->offset, - bfad_hcb_comp, &fcomp); + bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status == BFA_STATUS_OK) { wait_for_completion(&fcomp.comp); @@ -2750,9 +2818,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, case IOCMD_PORT_CFG_MAXFRSZ: rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); break; - case IOCMD_PORT_BBSC_ENABLE: - case IOCMD_PORT_BBSC_DISABLE: - rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd); + case IOCMD_PORT_BBCR_ENABLE: + case IOCMD_PORT_BBCR_DISABLE: + rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd); + break; + case IOCMD_PORT_BBCR_GET_ATTR: + rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd); break; case IOCMD_LPORT_GET_ATTR: rc = bfad_iocmd_lport_get_attr(bfad, iocmd); @@ -2836,6 +2907,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, case IOCMD_IOC_PCIFN_CFG: rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); break; + case IOCMD_IOC_FW_SIG_INV: + rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd); + break; case IOCMD_PCIFN_CREATE: rc = bfad_iocmd_pcifn_create(bfad, iocmd); break; @@ -2913,11 +2987,16 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); break; case IOCMD_DIAG_DPORT_ENABLE: + rc = bfad_iocmd_diag_dport_enable(bfad, iocmd); + break; case IOCMD_DIAG_DPORT_DISABLE: - rc = bfad_iocmd_diag_cfg_dport(bfad, cmd, iocmd); + rc = bfad_iocmd_diag_dport_disable(bfad, iocmd); + break; + case IOCMD_DIAG_DPORT_SHOW: + rc = bfad_iocmd_diag_dport_show(bfad, iocmd); break; - case IOCMD_DIAG_DPORT_GET_STATE: - rc = bfad_iocmd_diag_dport_get_state(bfad, iocmd); + case IOCMD_DIAG_DPORT_START: + rc = bfad_iocmd_diag_dport_start(bfad, iocmd); break; case IOCMD_PHY_GET_ATTR: rc = bfad_iocmd_phy_get_attr(bfad, iocmd); @@ -3309,7 +3388,8 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) goto out; } - if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload, + if (copy_from_user((uint8_t *)bsg_fcpt, + (void *)(unsigned long)bsg_data->payload, bsg_data->payload_len)) { kfree(bsg_fcpt); rc = -EIO; @@ -3463,8 +3543,8 @@ out_free_mem: kfree(rsp_kbuf); /* Need a copy to user op */ - if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt, - bsg_data->payload_len)) + if (copy_to_user((void *)(unsigned long)bsg_data->payload, + (void *)bsg_fcpt, bsg_data->payload_len)) rc = -EIO; kfree(bsg_fcpt); diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h index 15e1fc8e796..90abef69158 100644 --- a/drivers/scsi/bfa/bfad_bsg.h +++ b/drivers/scsi/bfa/bfad_bsg.h @@ -34,6 +34,7 @@ enum { IOCMD_IOC_RESET_FWSTATS, IOCMD_IOC_SET_ADAPTER_NAME, IOCMD_IOC_SET_PORT_NAME, + IOCMD_IOC_FW_SIG_INV, IOCMD_IOCFC_GET_ATTR, IOCMD_IOCFC_SET_INTR, IOCMD_PORT_ENABLE, @@ -46,8 +47,9 @@ enum { IOCMD_PORT_CFG_ALPA, IOCMD_PORT_CFG_MAXFRSZ, IOCMD_PORT_CLR_ALPA, - IOCMD_PORT_BBSC_ENABLE, - IOCMD_PORT_BBSC_DISABLE, + IOCMD_PORT_BBCR_ENABLE, + IOCMD_PORT_BBCR_DISABLE, + IOCMD_PORT_BBCR_GET_ATTR, IOCMD_LPORT_GET_ATTR, IOCMD_LPORT_GET_RPORTS, IOCMD_LPORT_GET_STATS, @@ -143,7 +145,6 @@ enum { IOCMD_FCPIM_LUNMASK_DELETE, IOCMD_DIAG_DPORT_ENABLE, IOCMD_DIAG_DPORT_DISABLE, - IOCMD_DIAG_DPORT_GET_STATE, IOCMD_QOS_SET_BW, IOCMD_FCPIM_THROTTLE_QUERY, IOCMD_FCPIM_THROTTLE_SET, @@ -152,6 +153,8 @@ enum { IOCMD_FRUVPD_READ, IOCMD_FRUVPD_UPDATE, IOCMD_FRUVPD_GET_MAX_SIZE, + IOCMD_DIAG_DPORT_SHOW, + IOCMD_DIAG_DPORT_START, }; struct bfa_bsg_gen_s { @@ -495,6 +498,20 @@ struct bfa_bsg_port_cfg_mode_s { struct bfa_port_cfg_mode_s cfg; }; +struct bfa_bsg_bbcr_enable_s { + bfa_status_t status; + u16 bfad_num; + u8 bb_scn; + u8 rsvd; +}; + +struct bfa_bsg_bbcr_attr_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_bbcr_attr_s attr; +}; + struct bfa_bsg_faa_attr_s { bfa_status_t status; u16 bfad_num; @@ -578,6 +595,21 @@ struct bfa_bsg_diag_loopback_s { struct bfa_diag_loopback_result_s result; }; +struct bfa_bsg_diag_dport_show_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + struct bfa_diag_dport_result_s result; +}; + +struct bfa_bsg_dport_enable_s { + bfa_status_t status; + u16 bfad_num; + u16 rsvd; + u16 lpcnt; + u16 pat; +}; + struct bfa_bsg_diag_fwping_s { bfa_status_t status; u16 bfad_num; @@ -625,13 +657,6 @@ struct bfa_bsg_diag_lb_stat_s { u16 rsvd; }; -struct bfa_bsg_diag_dport_get_state_s { - bfa_status_t status; - u16 bfad_num; - u16 rsvd; - enum bfa_dport_state state; -}; - struct bfa_bsg_phy_attr_s { bfa_status_t status; u16 bfad_num; @@ -770,10 +795,12 @@ struct bfa_bsg_tfru_s { struct bfa_bsg_fruvpd_s { bfa_status_t status; u16 bfad_num; - u16 rsvd; + u16 rsvd1; u32 offset; u32 len; u8 data[BFA_MAX_FRUVPD_TRANSFER_SIZE]; + u8 trfr_cmpl; + u8 rsvd2[3]; }; struct bfa_bsg_fruvpd_max_size_s { @@ -795,10 +822,12 @@ struct bfa_bsg_fcpt_s { }; #define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s +#pragma pack(1) struct bfa_bsg_data { int payload_len; - void *payload; + u64 payload; }; +#pragma pack() #define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz) \ (((__payload_len) != ((__hdrsz) + (__bufsz))) ? \ diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 439c012be76..8e83d0474fe 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -173,31 +173,9 @@ bfad_debugfs_open_reg(struct inode *inode, struct file *file) static loff_t bfad_debugfs_lseek(struct file *file, loff_t offset, int orig) { - struct bfad_debug_info *debug; - loff_t pos = file->f_pos; - - debug = file->private_data; - - switch (orig) { - case 0: - file->f_pos = offset; - break; - case 1: - file->f_pos += offset; - break; - case 2: - file->f_pos = debug->buffer_len - offset; - break; - default: - return -EINVAL; - } - - if (file->f_pos < 0 || file->f_pos > debug->buffer_len) { - file->f_pos = pos; - return -EINVAL; - } - - return file->f_pos; + struct bfad_debug_info *debug = file->private_data; + return fixed_size_llseek(file, offset, orig, + debug->buffer_len); } static ssize_t diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 0c64a04f01f..8b97877d42c 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -57,7 +57,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "3.1.2.1" +#define BFAD_DRIVER_VERSION "3.2.23.0" #endif #define BFAD_PROTO_NAME FCPI_NAME @@ -240,8 +240,8 @@ enum bfad_sm_event { BFAD_E_KTHREAD_CREATE_FAILED = 2, BFAD_E_INIT = 3, BFAD_E_INIT_SUCCESS = 4, - BFAD_E_INIT_FAILED = 5, - BFAD_E_INTR_INIT_FAILED = 6, + BFAD_E_HAL_INIT_FAILED = 5, + BFAD_E_INIT_FAILED = 6, BFAD_E_FCS_EXIT_COMP = 7, BFAD_E_EXIT_COMP = 8, BFAD_E_STOP = 9 diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 5864f987f20..f067332bf76 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -73,9 +73,14 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, break; - case BFI_IOIM_STS_ABORTED: case BFI_IOIM_STS_TIMEDOUT: + host_status = DID_TIME_OUT; + cmnd->result = ScsiResult(host_status, 0); + break; case BFI_IOIM_STS_PATHTOV: + host_status = DID_TRANSPORT_DISRUPTED; + cmnd->result = ScsiResult(host_status, 0); + break; default: host_status = DID_ERROR; cmnd->result = ScsiResult(host_status, 0); @@ -206,7 +211,7 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd) spin_lock_irqsave(&bfad->bfad_lock, flags); hal_io = (struct bfa_ioim_s *) cmnd->host_scribble; if (!hal_io) { - /* IO has been completed, retrun success */ + /* IO has been completed, return success */ rc = SUCCESS; goto out; } @@ -944,13 +949,15 @@ static int bfad_im_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - struct bfad_itnim_data_s *itnim_data = - (struct bfad_itnim_data_s *) rport->dd_data; - struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; + struct bfad_itnim_data_s *itnim_data; + struct bfa_s *bfa; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; + itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; + bfa = itnim_data->itnim->bfa_itnim->bfa; + if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) { /* * We should not mask LUN 0 - since this will translate @@ -1035,7 +1042,7 @@ bfad_fc_host_init(struct bfad_im_port_s *im_port) /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(host)[7] = 1; - strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, + strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); sprintf(fc_host_symbolic_name(host), "%s", symname); diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 57b146bca18..9ef91f907de 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -46,6 +46,7 @@ */ #define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ #define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) +#define BFI_FLASH_IMAGE_SZ 0x100000 /* * Msg header common to all msgs @@ -264,6 +265,7 @@ struct bfi_ioc_getattr_req_s { union bfi_addr_u attr_addr; }; +#define BFI_IOC_ATTR_UUID_SZ 16 struct bfi_ioc_attr_s { wwn_t mfg_pwwn; /* Mfg port wwn */ wwn_t mfg_nwwn; /* Mfg node wwn */ @@ -292,6 +294,7 @@ struct bfi_ioc_attr_s { u8 mfg_day; /* manufacturing day */ u8 mfg_month; /* manufacturing month */ u16 mfg_year; /* manufacturing year */ + u8 uuid[BFI_IOC_ATTR_UUID_SZ]; /*!< chinook uuid */ }; /* @@ -322,7 +325,29 @@ struct bfi_ioc_getattr_reply_s { #define BFI_IOC_TRC_ENTS 256 #define BFI_IOC_FW_SIGNATURE (0xbfadbfad) +#define BFA_IOC_FW_INV_SIGN (0xdeaddead) #define BFI_IOC_MD5SUM_SZ 4 + +struct bfi_ioc_fwver_s { +#ifdef __BIG_ENDIAN + uint8_t patch; + uint8_t maint; + uint8_t minor; + uint8_t major; + uint8_t rsvd[2]; + uint8_t build; + uint8_t phase; +#else + uint8_t major; + uint8_t minor; + uint8_t maint; + uint8_t patch; + uint8_t phase; + uint8_t build; + uint8_t rsvd[2]; +#endif +}; + struct bfi_ioc_image_hdr_s { u32 signature; /* constant signature */ u8 asic_gen; /* asic generation */ @@ -331,10 +356,18 @@ struct bfi_ioc_image_hdr_s { u8 port1_mode; /* device mode for port 1 */ u32 exec; /* exec vector */ u32 bootenv; /* fimware boot env */ - u32 rsvd_b[4]; + u32 rsvd_b[2]; + struct bfi_ioc_fwver_s fwver; u32 md5sum[BFI_IOC_MD5SUM_SZ]; }; +enum bfi_ioc_img_ver_cmp_e { + BFI_IOC_IMG_VER_INCOMP, + BFI_IOC_IMG_VER_OLD, + BFI_IOC_IMG_VER_SAME, + BFI_IOC_IMG_VER_BETTER +}; + #define BFI_FWBOOT_DEVMODE_OFF 4 #define BFI_FWBOOT_TYPE_OFF 8 #define BFI_FWBOOT_ENV_OFF 12 @@ -344,6 +377,12 @@ struct bfi_ioc_image_hdr_s { ((u32)(__p0_mode)) << 8 | \ ((u32)(__p1_mode))) +enum bfi_fwboot_type { + BFI_FWBOOT_TYPE_NORMAL = 0, + BFI_FWBOOT_TYPE_FLASH = 1, + BFI_FWBOOT_TYPE_MEMTEST = 2, +}; + #define BFI_FWBOOT_TYPE_NORMAL 0 #define BFI_FWBOOT_TYPE_MEMTEST 2 #define BFI_FWBOOT_ENV_OS 0 @@ -374,6 +413,10 @@ enum bfi_ioc_state { BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ }; +#define BFA_IOC_CB_JOIN_SH 16 +#define BFA_IOC_CB_FWSTATE_MASK 0x0000ffff +#define BFA_IOC_CB_JOIN_MASK 0xffff0000 + #define BFI_IOC_ENDIAN_SIG 0x12345678 enum { @@ -973,6 +1016,7 @@ enum bfi_diag_i2h { BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST), BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST), BFI_DIAG_I2H_DPORT = BFA_I2HM(BFI_DIAG_H2I_DPORT), + BFI_DIAG_I2H_DPORT_SCN = BFA_I2HM(8), }; #define BFI_DIAG_MAX_SGES 2 @@ -1064,16 +1108,74 @@ struct bfi_diag_qtest_req_s { enum bfi_dport_req { BFI_DPORT_DISABLE = 0, /* disable dport request */ BFI_DPORT_ENABLE = 1, /* enable dport request */ + BFI_DPORT_START = 2, /* start dport request */ + BFI_DPORT_SHOW = 3, /* show dport request */ + BFI_DPORT_DYN_DISABLE = 4, /* disable dynamic dport request */ +}; + +enum bfi_dport_scn { + BFI_DPORT_SCN_TESTSTART = 1, + BFI_DPORT_SCN_TESTCOMP = 2, + BFI_DPORT_SCN_SFP_REMOVED = 3, + BFI_DPORT_SCN_DDPORT_ENABLE = 4, + BFI_DPORT_SCN_DDPORT_DISABLE = 5, + BFI_DPORT_SCN_FCPORT_DISABLE = 6, + BFI_DPORT_SCN_SUBTESTSTART = 7, + BFI_DPORT_SCN_TESTSKIP = 8, + BFI_DPORT_SCN_DDPORT_DISABLED = 9, }; struct bfi_diag_dport_req_s { struct bfi_mhdr_s mh; /* 4 bytes */ - u8 req; /* request 1: enable 0: disable */ - u8 status; /* reply status */ - u8 rsvd[2]; - u32 msgtag; /* msgtag for reply */ + u8 req; /* request 1: enable 0: disable */ + u8 rsvd[3]; + u32 lpcnt; + u32 payload; +}; + +struct bfi_diag_dport_rsp_s { + struct bfi_mhdr_s mh; /* header 4 bytes */ + bfa_status_t status; /* reply status */ + wwn_t pwwn; /* switch port wwn. 8 bytes */ + wwn_t nwwn; /* switch node wwn. 8 bytes */ +}; + +struct bfi_diag_dport_scn_teststart_s { + wwn_t pwwn; /* switch port wwn. 8 bytes */ + wwn_t nwwn; /* switch node wwn. 8 bytes */ + u8 type; /* bfa_diag_dport_test_type_e */ + u8 mode; /* bfa_diag_dport_test_opmode */ + u8 rsvd[2]; + u32 numfrm; /* from switch uint in 1M */ +}; + +struct bfi_diag_dport_scn_testcomp_s { + u8 status; /* bfa_diag_dport_test_status_e */ + u8 speed; /* bfa_port_speed_t */ + u16 numbuffer; /* from switch */ + u8 subtest_status[DPORT_TEST_MAX]; /* 4 bytes */ + u32 latency; /* from switch */ + u32 distance; /* from swtich unit in meters */ + /* Buffers required to saturate the link */ + u16 frm_sz; /* from switch for buf_reqd */ + u8 rsvd[2]; +}; + +struct bfi_diag_dport_scn_s { /* max size == RDS_RMESZ */ + struct bfi_mhdr_s mh; /* header 4 bytes */ + u8 state; /* new state */ + u8 rsvd[3]; + union { + struct bfi_diag_dport_scn_teststart_s teststart; + struct bfi_diag_dport_scn_testcomp_s testcomp; + } info; +}; + +union bfi_diag_dport_msg_u { + struct bfi_diag_dport_req_s req; + struct bfi_diag_dport_rsp_s rsp; + struct bfi_diag_dport_scn_s scn; }; -#define bfi_diag_dport_rsp_t struct bfi_diag_dport_req_s /* * PHY module specific @@ -1191,7 +1293,9 @@ enum bfi_fru_i2h_msgs { struct bfi_fru_write_req_s { struct bfi_mhdr_s mh; /* Common msg header */ u8 last; - u8 rsv[3]; + u8 rsv_1[3]; + u8 trfr_cmpl; + u8 rsv_2[3]; u32 offset; u32 length; struct bfi_alen_s alen; diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index 5ae2c167b2c..1a3fe5ad58f 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h @@ -276,8 +276,7 @@ struct bfi_fcport_enable_req_s { struct bfi_fcport_set_svc_params_req_s { struct bfi_mhdr_s mh; /* msg header */ __be16 tx_bbcredit; /* Tx credits */ - u8 bb_scn; /* BB_SC FC credit recovery */ - u8 rsvd; + u8 rsvd[2]; }; /* @@ -446,8 +445,8 @@ struct bfi_lps_login_rsp_s { mac_t fcf_mac; u8 ext_status; u8 brcd_switch; /* attached peer is brcd switch */ - u8 bb_scn; /* atatched port's bb_scn */ u8 bfa_tag; + u8 rsvd; }; struct bfi_lps_logout_req_s { diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 50fcd018d14..6a976657b47 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -2,7 +2,7 @@ #define _BNX2FC_H_ /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -64,10 +64,12 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.13" +#define BNX2FC_VERSION "2.4.2" #define PFX "bnx2fc: " +#define BCM_CHIP_LEN 16 + #define BNX2X_DOORBELL_PCI_BAR 2 #define BNX2FC_MAX_BD_LEN 0xffff @@ -88,9 +90,6 @@ #define BNX2FC_MAX_NPIV 256 -#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048 -#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS -#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE #define BNX2FC_MIN_PAYLOAD 256 #define BNX2FC_MAX_PAYLOAD 2048 #define BNX2FC_MFS \ @@ -106,13 +105,10 @@ #define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) #define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) #define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) -#define BNX2FC_5771X_DB_PAGE_SIZE 128 +#define BNX2X_DB_SHIFT 3 -#define BNX2FC_MAX_TASKS \ - (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS) #define BNX2FC_TASK_SIZE 128 #define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) -#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) #define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 #define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) @@ -125,12 +121,9 @@ #define BNX2FC_WRITE (1 << 0) #define BNX2FC_MIN_XID 0 -#define BNX2FC_MAX_XID \ - (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1) #define FCOE_MAX_NUM_XIDS 0x2000 -#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) -#define FCOE_MAX_XID (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1) -#define FCOE_XIDS_PER_CPU (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1) +#define FCOE_MAX_XID_OFFSET (FCOE_MAX_NUM_XIDS - 1) +#define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1) #define BNX2FC_MAX_LUN 0xFFFF #define BNX2FC_MAX_FCP_TGT 256 #define BNX2FC_MAX_CMD_LEN 16 @@ -206,6 +199,13 @@ struct bnx2fc_hba { #define BNX2FC_FLAG_FW_INIT_DONE 0 #define BNX2FC_FLAG_DESTROY_CMPL 1 u32 next_conn_id; + + /* xid resources */ + u16 max_xid; + u32 max_tasks; + u32 max_outstanding_cmds; + u32 elstm_xids; + struct fcoe_task_ctx_entry **task_ctx; dma_addr_t *task_ctx_dma; struct regpair *task_ctx_bd_tbl; @@ -243,6 +243,8 @@ struct bnx2fc_hba { int wait_for_link_down; int num_ofld_sess; struct list_head vports; + + char chip_num[BCM_CHIP_LEN]; }; struct bnx2fc_interface { @@ -365,6 +367,7 @@ struct bnx2fc_rport { atomic_t num_active_ios; u32 flush_in_prog; unsigned long timestamp; + unsigned long retry_delay_timestamp; struct list_head free_task_list; struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; struct list_head active_cmd_queue; @@ -504,8 +507,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba); void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); -struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, - u16 min_xid, u16 max_xid); +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba); void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); void bnx2fc_get_link_state(struct bnx2fc_hba *hba); char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index bdbbb13b853..b1c9a4f8cae 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -3,7 +3,7 @@ * This file contains helper routines that handle ELS requests * and responses. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2daf4b0da43..785d0d71781 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -3,7 +3,7 @@ * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Dec 21, 2012" +#define DRV_MODULE_RELDATE "Dec 11, 2013" static char version[] = @@ -71,7 +71,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb); static void bnx2fc_start_disc(struct bnx2fc_interface *interface); static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); static int bnx2fc_lport_config(struct fc_lport *lport); -static int bnx2fc_em_config(struct fc_lport *lport); +static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba); static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); @@ -464,7 +464,7 @@ static int bnx2fc_l2_rcv_thread(void *arg) struct fcoe_percpu_s *bg = arg; struct sk_buff *skb; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); @@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); - stats = per_cpu_ptr(lport->stats, get_cpu()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { - put_cpu(); kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { - put_cpu(); kfree_skb(skb); return; } @@ -542,10 +536,8 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); if (vn_port) { port = lport_priv(vn_port); - if (compare_ether_addr(port->data_src_addr, dest_mac) - != 0) { + if (!ether_addr_equal(port->data_src_addr, dest_mac)) { BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); - put_cpu(); kfree_skb(skb); return; } @@ -553,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ - put_cpu(); kfree_skb(skb); return; } @@ -563,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) case ELS_LOGO: if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { /* drop non-FIP LOGO */ - put_cpu(); kfree_skb(skb); return; } @@ -573,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { /* Drop incoming ABTS */ - put_cpu(); kfree_skb(skb); return; } + stats = per_cpu_ptr(lport->stats, smp_processor_id()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + if (le32_to_cpu(fr_crc(fp)) != ~crc32(~0, skb->data, fr_len)) { if (stats->InvalidCRCCount < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); stats->InvalidCRCCount++; - put_cpu(); kfree_skb(skb); return; } - put_cpu(); fc_exch_recv(lport, fp); } @@ -603,7 +594,7 @@ int bnx2fc_percpu_io_thread(void *arg) struct bnx2fc_work *work, *tmp; LIST_HEAD(work_list); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); @@ -679,6 +670,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct Scsi_Host *shost = lport->host; int rc = 0; @@ -699,8 +691,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) } if (!lport->vport) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; - sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", - BNX2FC_NAME, BNX2FC_VERSION, + snprintf(fc_host_symbolic_name(lport->host), 256, + "%s (Broadcom %s) v%s over %s", + BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, interface->netdev->name); return 0; @@ -849,6 +842,9 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, __bnx2fc_destroy(interface); } mutex_unlock(&bnx2fc_dev_lock); + + /* Ensure ALL destroy work has been completed before return */ + flush_workqueue(bnx2fc_wq); return; default: @@ -940,19 +936,21 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) fc_exch_init(lport); fc_rport_init(lport); fc_disc_init(lport); + fc_disc_config(lport, lport); return 0; } -static int bnx2fc_em_config(struct fc_lport *lport) +static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba) { - int max_xid; + int fcoe_min_xid, fcoe_max_xid; + fcoe_min_xid = hba->max_xid + 1; if (nr_cpu_ids <= 2) - max_xid = FCOE_XIDS_PER_CPU; + fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET; else - max_xid = FCOE_MAX_XID; - if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, - max_xid, NULL)) { + fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET; + if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid, + fcoe_max_xid, NULL)) { printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); return -ENOMEM; } @@ -1299,6 +1297,12 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) mutex_init(&hba->hba_mutex); hba->cnic = cnic; + + hba->max_tasks = cnic->max_fcoe_exchanges; + hba->elstm_xids = (hba->max_tasks / 2); + hba->max_outstanding_cmds = hba->elstm_xids; + hba->max_xid = (hba->max_tasks - 1); + rc = bnx2fc_bind_pcidev(hba); if (rc) { printk(KERN_ERR PFX "create_adapter: bind error\n"); @@ -1317,8 +1321,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) hba->num_ofld_sess = 0; - hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, - BNX2FC_MAX_XID); + hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba); if (!hba->cmd_mgr) { printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); goto cmgr_err; @@ -1329,13 +1332,13 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) FCOE_IOS_PER_CONNECTION_SHIFT; fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << FCOE_LOGINS_PER_PORT_SHIFT; - fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS << + fcoe_cap->capability2 = hba->max_outstanding_cmds << FCOE_NUMBER_OF_EXCHANGES_SHIFT; fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << FCOE_NPIV_WWN_PER_PORT_SHIFT; fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << FCOE_TARGETS_SUPPORTED_SHIFT; - fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS << + fcoe_cap->capability3 |= hba->max_outstanding_cmds << FCOE_OUTSTANDING_COMMANDS_SHIFT; fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; @@ -1372,6 +1375,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, return NULL; } ctlr = fcoe_ctlr_device_priv(ctlr_dev); + ctlr->cdev = ctlr_dev; interface = fcoe_ctlr_priv(ctlr); dev_hold(netdev); kref_init(&interface->kref); @@ -1415,7 +1419,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct Scsi_Host *shost; struct fc_vport *vport = dev_to_vport(parent); struct bnx2fc_lport *blport; - struct bnx2fc_hba *hba; + struct bnx2fc_hba *hba = interface->hba; int rc = 0; blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); @@ -1425,6 +1429,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, } /* Allocate Scsi_Host structure */ + bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds; if (!npiv) lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); else @@ -1476,7 +1481,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, /* Allocate exchange manager */ if (!npiv) - rc = bnx2fc_em_config(lport); + rc = bnx2fc_em_config(lport, hba); else { shost = vport_to_shost(vport); n_port = shost_priv(shost); @@ -1490,7 +1495,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, bnx2fc_interface_get(interface); - hba = interface->hba; spin_lock_bh(&hba->hba_lock); blport->lport = lport; list_add_tail(&blport->list, &hba->vports); @@ -1649,23 +1653,60 @@ mem_err: static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) { struct cnic_dev *cnic; + struct pci_dev *pdev; if (!hba->cnic) { printk(KERN_ERR PFX "cnic is NULL\n"); return -ENODEV; } cnic = hba->cnic; - hba->pcidev = cnic->pcidev; - if (hba->pcidev) - pci_dev_get(hba->pcidev); + pdev = hba->pcidev = cnic->pcidev; + if (!hba->pcidev) + return -ENODEV; + switch (pdev->device) { + case PCI_DEVICE_ID_NX2_57710: + strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57711: + strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57712: + case PCI_DEVICE_ID_NX2_57712_MF: + case PCI_DEVICE_ID_NX2_57712_VF: + strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57800: + case PCI_DEVICE_ID_NX2_57800_MF: + case PCI_DEVICE_ID_NX2_57800_VF: + strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57810: + case PCI_DEVICE_ID_NX2_57810_MF: + case PCI_DEVICE_ID_NX2_57810_VF: + strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57840: + case PCI_DEVICE_ID_NX2_57840_MF: + case PCI_DEVICE_ID_NX2_57840_VF: + case PCI_DEVICE_ID_NX2_57840_2_20: + case PCI_DEVICE_ID_NX2_57840_4_10: + strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); + break; + default: + pr_err(PFX "Unknown device id 0x%x\n", pdev->device); + break; + } + pci_dev_get(hba->pcidev); return 0; } static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) { - if (hba->pcidev) + if (hba->pcidev) { + hba->chip_num[0] = '\0'; pci_dev_put(hba->pcidev); + } hba->pcidev = NULL; } @@ -1958,6 +1999,24 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev) set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); } +/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ +static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + + if (interface->enabled == true) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_disable: lport not found\n"); + return -ENODEV; + } else { + interface->enabled = false; + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); + } + } + return 0; +} + /** * Deperecated: Use bnx2fc_enabled() */ @@ -1972,20 +2031,34 @@ static int bnx2fc_disable(struct net_device *netdev) interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); - if (!interface || !ctlr->lp) { + + if (!interface) { rc = -ENODEV; - printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); + pr_err(PFX "bnx2fc_disable: interface not found\n"); } else { - interface->enabled = false; - fcoe_ctlr_link_down(ctlr); - fcoe_clean_pending_queue(ctlr->lp); + rc = __bnx2fc_disable(ctlr); } - mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } +static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + + if (interface->enabled == false) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_enable: lport not found\n"); + return -ENODEV; + } else if (!bnx2fc_link_ok(ctlr->lp)) { + fcoe_ctlr_link_up(ctlr); + interface->enabled = true; + } + } + return 0; +} + /** * Deprecated: Use bnx2fc_enabled() */ @@ -2000,12 +2073,11 @@ static int bnx2fc_enable(struct net_device *netdev) interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); - if (!interface || !ctlr->lp) { + if (!interface) { rc = -ENODEV; - printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); - } else if (!bnx2fc_link_ok(ctlr->lp)) { - fcoe_ctlr_link_up(ctlr); - interface->enabled = true; + pr_err(PFX "bnx2fc_enable: interface not found\n"); + } else { + rc = __bnx2fc_enable(ctlr); } mutex_unlock(&bnx2fc_dev_lock); @@ -2026,14 +2098,12 @@ static int bnx2fc_enable(struct net_device *netdev) static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) { struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); - struct fc_lport *lport = ctlr->lp; - struct net_device *netdev = bnx2fc_netdev(lport); switch (cdev->enabled) { case FCOE_CTLR_ENABLED: - return bnx2fc_enable(netdev); + return __bnx2fc_enable(ctlr); case FCOE_CTLR_DISABLED: - return bnx2fc_disable(netdev); + return __bnx2fc_disable(ctlr); case FCOE_CTLR_UNUSED: default: return -ENOTSUPP; @@ -2133,6 +2203,7 @@ static int _bnx2fc_create(struct net_device *netdev, } ctlr = bnx2fc_to_ctlr(interface); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); interface->vlan_id = vlan_id; interface->timer_work_queue = @@ -2143,7 +2214,7 @@ static int _bnx2fc_create(struct net_device *netdev, goto ifput_err; } - lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); + lport = bnx2fc_if_create(interface, &cdev->dev, 0); if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); @@ -2159,8 +2230,6 @@ static int _bnx2fc_create(struct net_device *netdev, /* Make this master N_port */ ctlr->lp = lport; - cdev = fcoe_ctlr_to_ctlr_dev(ctlr); - if (link_state == BNX2FC_CREATE_LINK_UP) cdev->enabled = FCOE_CTLR_ENABLED; else @@ -2315,6 +2384,9 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); + /* Ensure ALL destroy work has been completed before return */ + flush_workqueue(bnx2fc_wq); + bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) @@ -2512,12 +2584,16 @@ static int __init bnx2fc_mod_init(void) spin_lock_init(&p->fp_work_lock); } + cpu_notifier_register_begin(); + for_each_online_cpu(cpu) { bnx2fc_percpu_thread_create(cpu); } /* Initialize per CPU interrupt thread */ - register_hotcpu_notifier(&bnx2fc_cpu_notifier); + __register_hotcpu_notifier(&bnx2fc_cpu_notifier); + + cpu_notifier_register_done(); cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); @@ -2582,13 +2658,17 @@ static void __exit bnx2fc_mod_exit(void) if (l2_thread) kthread_stop(l2_thread); - unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); + cpu_notifier_register_begin(); /* Destroy per cpu threads */ for_each_online_cpu(cpu) { bnx2fc_percpu_thread_destroy(cpu); } + __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); + + cpu_notifier_register_done(); + destroy_workqueue(bnx2fc_wq); /* * detach from scsi transport @@ -2706,7 +2786,6 @@ static struct scsi_host_template bnx2fc_shost_template = { .change_queue_type = fc_change_queue_type, .this_id = -1, .cmd_per_lun = 3, - .can_queue = BNX2FC_CAN_QUEUE, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, .max_sectors = 1024, diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 85ea98a80f4..512aed3ae4f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -2,7 +2,7 @@ * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -77,7 +77,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); - fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; + fcoe_init1.num_tasks = hba->max_tasks; fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; @@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; - fcoe_init3.perf_config = 1; + /* + * enable both cached connection and cached tasks + * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both + */ + fcoe_init3.perf_config = 3; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; @@ -697,7 +701,7 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); - if (xid > BNX2FC_MAX_XID) { + if (xid > hba->max_xid) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); goto ret_err_rqe; @@ -815,7 +819,7 @@ ret_err_rqe: BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); - if (xid > BNX2FC_MAX_XID) { + if (xid > hba->max_xid) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); goto ret_warn_rqe; } @@ -880,7 +884,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) spin_lock_bh(&tgt->tgt_lock); xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; - if (xid >= BNX2FC_MAX_TASKS) { + if (xid >= hba->max_tasks) { printk(KERN_ERR PFX "ERROR:xid out of range\n"); spin_unlock_bh(&tgt->tgt_lock); return; @@ -1417,8 +1421,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); - reg_off = BNX2FC_5771X_DB_PAGE_SIZE * - (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; + reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; @@ -1842,6 +1845,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) int rc = 0; struct regpair *task_ctx_bdt; dma_addr_t addr; + int task_ctx_arr_sz; int i; /* @@ -1865,7 +1869,8 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) * Allocate task_ctx which is an array of pointers pointing to * a page containing 32 task contexts */ - hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), + task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); + hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)), GFP_KERNEL); if (!hba->task_ctx) { printk(KERN_ERR PFX "unable to allocate task context array\n"); @@ -1876,7 +1881,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) /* * Allocate task_ctx_dma which is an array of dma addresses */ - hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * + hba->task_ctx_dma = kmalloc((task_ctx_arr_sz * sizeof(dma_addr_t)), GFP_KERNEL); if (!hba->task_ctx_dma) { printk(KERN_ERR PFX "unable to alloc context mapping array\n"); @@ -1885,7 +1890,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) } task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; - for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + for (i = 0; i < task_ctx_arr_sz; i++) { hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, @@ -1905,7 +1910,7 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) return 0; out3: - for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + for (i = 0; i < task_ctx_arr_sz; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, @@ -1929,6 +1934,7 @@ out: void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) { + int task_ctx_arr_sz; int i; if (hba->task_ctx_bd_tbl) { @@ -1938,8 +1944,9 @@ void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) hba->task_ctx_bd_tbl = NULL; } + task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); if (hba->task_ctx) { - for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + for (i = 0; i < task_ctx_arr_sz; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx[i], @@ -1959,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) { int i; int segment_count; - int hash_table_size; u32 *pbl; - segment_count = hba->hash_tbl_segment_count; - hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * - sizeof(struct fcoe_hash_table_entry); + if (hba->hash_tbl_segments) { - pbl = hba->hash_tbl_pbl; - for (i = 0; i < segment_count; ++i) { - dma_addr_t dma_address; + pbl = hba->hash_tbl_pbl; + if (pbl) { + segment_count = hba->hash_tbl_segment_count; + for (i = 0; i < segment_count; ++i) { + dma_addr_t dma_address; - dma_address = le32_to_cpu(*pbl); - ++pbl; - dma_address += ((u64)le32_to_cpu(*pbl)) << 32; - ++pbl; - dma_free_coherent(&hba->pcidev->dev, - BNX2FC_HASH_TBL_CHUNK_SIZE, - hba->hash_tbl_segments[i], - dma_address); + dma_address = le32_to_cpu(*pbl); + ++pbl; + dma_address += ((u64)le32_to_cpu(*pbl)) << 32; + ++pbl; + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_address); + } + } + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; } if (hba->hash_tbl_pbl) { @@ -2016,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); if (!dma_segment_array) { printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); - return -ENOMEM; + goto cleanup_ht; } for (i = 0; i < segment_count; ++i) { @@ -2027,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) GFP_KERNEL); if (!hba->hash_tbl_segments[i]) { printk(KERN_ERR PFX "hash segment alloc failed\n"); - while (--i >= 0) { - dma_free_coherent(&hba->pcidev->dev, - BNX2FC_HASH_TBL_CHUNK_SIZE, - hba->hash_tbl_segments[i], - dma_segment_array[i]); - hba->hash_tbl_segments[i] = NULL; - } - kfree(dma_segment_array); - return -ENOMEM; + goto cleanup_dma; } memset(hba->hash_tbl_segments[i], 0, BNX2FC_HASH_TBL_CHUNK_SIZE); @@ -2047,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) GFP_KERNEL); if (!hba->hash_tbl_pbl) { printk(KERN_ERR PFX "hash table pbl alloc failed\n"); - kfree(dma_segment_array); - return -ENOMEM; + goto cleanup_dma; } memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); @@ -2073,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) } kfree(dma_segment_array); return 0; + +cleanup_dma: + for (i = 0; i < segment_count; ++i) { + if (hba->hash_tbl_segments[i]) + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_segment_array[i]); + } + + kfree(dma_segment_array); + +cleanup_ht: + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; + return -ENOMEM; } /** diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 60798e829de..7bc47fc7c68 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1,7 +1,7 @@ /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. * IO manager and SCSI IO processing. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -239,8 +239,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) sc_cmd->scsi_done(sc_cmd); } -struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, - u16 min_xid, u16 max_xid) +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) { struct bnx2fc_cmd_mgr *cmgr; struct io_bdt *bdt_info; @@ -252,6 +251,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, int num_ios, num_pri_ios; size_t bd_tbl_sz; int arr_sz = num_possible_cpus() + 1; + u16 min_xid = BNX2FC_MIN_XID; + u16 max_xid = hba->max_xid; if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ @@ -281,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, arr_sz, GFP_KERNEL); if (!cmgr->free_list_lock) { printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + kfree(cmgr->free_list); + cmgr->free_list = NULL; goto mem_err; } @@ -298,7 +301,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, * of slow path requests. */ xid = BNX2FC_MIN_XID; - num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; + num_pri_ios = num_ios - hba->elstm_xids; for (i = 0; i < num_ios; i++) { io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); @@ -367,7 +370,7 @@ void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) struct bnx2fc_hba *hba = cmgr->hba; size_t bd_tbl_sz; u16 min_xid = BNX2FC_MIN_XID; - u16 max_xid = BNX2FC_MAX_XID; + u16 max_xid = hba->max_xid; int num_ios; int i; @@ -593,13 +596,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) mp_req->mp_resp_bd = NULL; } if (mp_req->req_buf) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, mp_req->req_buf, mp_req->req_buf_dma); mp_req->req_buf = NULL; } if (mp_req->resp_buf) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, mp_req->resp_buf, mp_req->resp_buf_dma); mp_req->resp_buf = NULL; @@ -621,7 +624,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) mp_req->req_len = sizeof(struct fcp_cmnd); io_req->data_xfer_len = mp_req->req_len; - mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &mp_req->req_buf_dma, GFP_ATOMIC); if (!mp_req->req_buf) { @@ -630,7 +633,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) return FAILED; } - mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_ATOMIC); if (!mp_req->resp_buf) { @@ -638,8 +641,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) bnx2fc_free_mp_resc(io_req); return FAILED; } - memset(mp_req->req_buf, 0, PAGE_SIZE); - memset(mp_req->resp_buf, 0, PAGE_SIZE); + memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); + memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); /* Allocate and map mp_req_bd and mp_resp_bd */ sz = sizeof(struct fcoe_bd_ctx); @@ -664,7 +667,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) mp_req_bd = mp_req->mp_req_bd; mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); - mp_req_bd->buf_len = PAGE_SIZE; + mp_req_bd->buf_len = CNIC_PAGE_SIZE; mp_req_bd->flags = 0; /* @@ -676,7 +679,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) addr = mp_req->resp_buf_dma; mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); - mp_resp_bd->buf_len = PAGE_SIZE; + mp_resp_bd->buf_len = CNIC_PAGE_SIZE; mp_resp_bd->flags = 0; return SUCCESS; @@ -1245,6 +1248,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ rc = bnx2fc_expl_logo(lport, io_req); + /* This only occurs when an task abort was requested while ABTS + is in progress. Setting the IO_CLEANUP flag will skip the + RRQ process in the case when the fw generated SCSI_CMD cmpl + was a result from the ABTS request rather than the CLEANUP + request */ + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); goto out; } @@ -1269,8 +1278,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; - if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, - &io_req->req_flags))) { + if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); + rc = SUCCESS; + } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); @@ -1861,7 +1873,15 @@ int bnx2fc_queuecommand(struct Scsi_Host *host, rc = SCSI_MLQUEUE_TARGET_BUSY; goto exit_qcmd; } - + if (tgt->retry_delay_timestamp) { + if (time_after(jiffies, tgt->retry_delay_timestamp)) { + tgt->retry_delay_timestamp = 0; + } else { + /* If retry_delay timer is active, flow off the ML */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + } io_req = bnx2fc_cmd_alloc(tgt); if (!io_req) { rc = SCSI_MLQUEUE_HOST_BUSY; @@ -1951,6 +1971,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, " fcp_resid = 0x%x\n", io_req->cdb_status, io_req->fcp_resid); sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || + io_req->cdb_status == SAM_STAT_BUSY) { + /* Set the jiffies + retry_delay_timer * 100ms + for the rport/tgt */ + tgt->retry_delay_timestamp = jiffies + + fcp_rsp->retry_delay_timer * HZ / 10; + } + } if (io_req->fcp_resid) scsi_set_resid(sc_cmd, io_req->fcp_resid); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index c57a3bb8a9f..6870cf6781d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -2,7 +2,7 @@ * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -386,6 +386,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, tgt->rq_prod_idx = 0x8000; tgt->rq_cons_idx = 0; atomic_set(&tgt->num_active_ios, 0); + tgt->retry_delay_timestamp = 0; if (rdata->flags & FC_RP_FLAGS_RETRY && rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && @@ -673,7 +674,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map SQ */ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; - tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, &tgt->sq_dma, GFP_KERNEL); @@ -686,7 +688,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map CQ */ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; - tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, &tgt->cq_dma, GFP_KERNEL); @@ -699,7 +702,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map RQ and RQ PBL */ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; - tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, &tgt->rq_dma, GFP_KERNEL); @@ -710,8 +714,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, } memset(tgt->rq, 0, tgt->rq_mem_size); - tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); - tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, &tgt->rq_pbl_dma, GFP_KERNEL); @@ -722,7 +727,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, } memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); - num_pages = tgt->rq_mem_size / PAGE_SIZE; + num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; page = tgt->rq_dma; pbl = (u32 *)tgt->rq_pbl; @@ -731,13 +736,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, pbl++; *pbl = (u32)((u64)page >> 32); pbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } /* Allocate and map XFERQ */ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; - tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & - PAGE_MASK; + tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, &tgt->xferq_dma, GFP_KERNEL); @@ -750,8 +755,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map CONFQ & CONFQ PBL */ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; - tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & - PAGE_MASK; + tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, &tgt->confq_dma, GFP_KERNEL); @@ -763,9 +768,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, memset(tgt->confq, 0, tgt->confq_mem_size); tgt->confq_pbl_size = - (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); + (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); tgt->confq_pbl_size = - (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, @@ -777,7 +782,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, } memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); - num_pages = tgt->confq_mem_size / PAGE_SIZE; + num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; page = tgt->confq_dma; pbl = (u32 *)tgt->confq_pbl; @@ -786,7 +791,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, pbl++; *pbl = (u32)((u64)page >> 32); pbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } /* Allocate and map ConnDB */ @@ -805,8 +810,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, /* Allocate and map LCQ */ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; - tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & - PAGE_MASK; + tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, &tgt->lcq_dma, GFP_KERNEL); diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h index 25093a04123..3d33767f2f2 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h @@ -1,6 +1,6 @@ /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index f2db5fe7bdc..7052a839b0e 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -1,6 +1,6 @@ /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -581,8 +581,10 @@ struct iscsi_kwqe_init1 { #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 -#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) -#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6) +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7 u16 cq_num_wqes; #elif defined(__LITTLE_ENDIAN) u16 cq_num_wqes; @@ -593,8 +595,10 @@ struct iscsi_kwqe_init1 { #define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) #define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 -#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) -#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6) +#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7 u8 cq_log_wqes_per_page; #endif #if defined(__BIG_ENDIAN) diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index b44d04e41b0..c73bbcb63c0 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -1,6 +1,6 @@ /* bnx2i.h: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -64,7 +64,7 @@ #define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 #define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 -#define BNX2I_5771X_DBELL_PAGE_SIZE 128 +#define BNX2X_DB_SHIFT 3 /* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ #define MAX_BD_LENGTH 65535 @@ -580,8 +580,8 @@ struct bnx2i_5771x_dbell { * @sq_mem_size: SQ size * @sq_prod_qe: SQ producer entry pointer * @sq_cons_qe: SQ consumer entry pointer - * @sq_first_qe: virtaul address of first entry in SQ - * @sq_last_qe: virtaul address of last entry in SQ + * @sq_first_qe: virtual address of first entry in SQ + * @sq_last_qe: virtual address of last entry in SQ * @sq_prod_idx: SQ producer index * @sq_cons_idx: SQ consumer index * @sqe_left: number sq entry left @@ -593,8 +593,8 @@ struct bnx2i_5771x_dbell { * @cq_mem_size: CQ size * @cq_prod_qe: CQ producer entry pointer * @cq_cons_qe: CQ consumer entry pointer - * @cq_first_qe: virtaul address of first entry in CQ - * @cq_last_qe: virtaul address of last entry in CQ + * @cq_first_qe: virtual address of first entry in CQ + * @cq_last_qe: virtual address of last entry in CQ * @cq_prod_idx: CQ producer index * @cq_cons_idx: CQ consumer index * @cqe_left: number cq entry left @@ -608,8 +608,8 @@ struct bnx2i_5771x_dbell { * @rq_mem_size: RQ size * @rq_prod_qe: RQ producer entry pointer * @rq_cons_qe: RQ consumer entry pointer - * @rq_first_qe: virtaul address of first entry in RQ - * @rq_last_qe: virtaul address of last entry in RQ + * @rq_first_qe: virtual address of first entry in RQ + * @rq_last_qe: virtual address of last entry in RQ * @rq_prod_idx: RQ producer index * @rq_cons_idx: RQ consumer index * @rqe_left: number rq entry left diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index a28b03e5a5f..d6d491c2f00 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1,6 +1,6 @@ /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) * yield integral num of page buffers */ /* adjust SQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; if (hba->max_sqes < num_elements_per_pg) hba->max_sqes = num_elements_per_pg; else if (hba->max_sqes % num_elements_per_pg) @@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) ~(num_elements_per_pg - 1); /* adjust CQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; if (hba->max_cqes < num_elements_per_pg) hba->max_cqes = num_elements_per_pg; else if (hba->max_cqes % num_elements_per_pg) @@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) ~(num_elements_per_pg - 1); /* adjust RQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; + num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; if (hba->max_rqes < num_elements_per_pg) hba->max_rqes = num_elements_per_pg; else if (hba->max_rqes % num_elements_per_pg) @@ -126,7 +126,7 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) /** * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * @action: action, ARM or DISARM. For now only ARM_CQE is used * * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt @@ -756,7 +756,7 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) /** * bnx2i_send_conn_destroy - initiates iscsi connection teardown process * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate * iscsi connection context clean-up process @@ -791,7 +791,7 @@ int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /** * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE */ @@ -851,7 +851,7 @@ static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, /** * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE */ @@ -920,7 +920,7 @@ static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process * * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE */ @@ -939,7 +939,7 @@ int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /** * setup_qp_page_tables - iscsi QP page table setup function - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires * 64-bit address in big endian format. Whereas 10G/sec (57710) requires @@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) /* SQ page table */ memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); - num_pages = ep->qp.sq_mem_size / PAGE_SIZE; + num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.sq_phys; if (cnic_dev_10g) @@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ @@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) page; ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } } /* RQ page table */ memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); - num_pages = ep->qp.rq_mem_size / PAGE_SIZE; + num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.rq_phys; if (cnic_dev_10g) @@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ @@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) page; ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } } /* CQ page table */ memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); - num_pages = ep->qp.cq_mem_size / PAGE_SIZE; + num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; page = ep->qp.cq_phys; if (cnic_dev_10g) @@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) ((u64) page >> 32); ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } else { /* PTE is written in big endian format for * 5706/5708/5709 devices */ @@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) ptbl++; *ptbl = (u32) page; ptbl++; - page += PAGE_SIZE; + page += CNIC_PAGE_SIZE; } } } @@ -1046,7 +1046,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) /** * bnx2i_alloc_qp_resc - allocates required resources for QP. * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * Allocate QP (transport layer for iSCSI connection) resources, DMA'able * memory for SQ/RQ/CQ and page tables. EP structure elements such @@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate page table memory for SQ which is page aligned */ ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; ep->qp.sq_mem_size = - (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.sq_pgtbl_size = - (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); + (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.sq_pgtbl_size = - (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.sq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, @@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate page table memory for CQ which is page aligned */ ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; ep->qp.cq_mem_size = - (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.cq_pgtbl_size = - (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); + (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.cq_pgtbl_size = - (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.cq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, @@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) /* Allocate page table memory for RQ which is page aligned */ ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; ep->qp.rq_mem_size = - (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.rq_pgtbl_size = - (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); + (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); ep->qp.rq_pgtbl_size = - (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; ep->qp.rq_pgtbl_virt = dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, @@ -1191,7 +1191,7 @@ mem_alloc_err: /** * bnx2i_free_qp_resc - free memory resources held by QP * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * Free QP resources - SQ/RQ/CQ memory and page tables. */ @@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) bnx2i_adjust_qp_size(hba); iscsi_init.flags = - ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; + (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; if (en_tcp_dack) iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; iscsi_init.reserved0 = 0; @@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); iscsi_init.num_ccells_per_conn = hba->num_ccell; iscsi_init.num_tasks_per_conn = hba->max_sqes; - iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; iscsi_init.sq_num_wqes = hba->max_sqes; iscsi_init.cq_log_wqes_per_page = - (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); + (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); iscsi_init.cq_num_wqes = hba->max_cqes; iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + - (PAGE_SIZE - 1)) / PAGE_SIZE; + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + - (PAGE_SIZE - 1)) / PAGE_SIZE; + (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; iscsi_init.rq_num_wqes = hba->max_rqes; @@ -1361,7 +1361,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, u32 datalen = 0; resp_cqe = (struct bnx2i_cmd_response *)cqe; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->back_lock); task = iscsi_itt_to_task(conn, resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); if (!task) @@ -1432,7 +1432,7 @@ done: __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, datalen); fail: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); return 0; } @@ -1457,7 +1457,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session, int pad_len; login = (struct bnx2i_login_response *) cqe; - spin_lock(&session->lock); + spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, login->itt & ISCSI_LOGIN_RESPONSE_INDEX); if (!task) @@ -1500,7 +1500,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session, bnx2i_conn->gen_pdu.resp_buf, bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); done: - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); return 0; } @@ -1525,7 +1525,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session, int pad_len; text = (struct bnx2i_text_response *) cqe; - spin_lock(&session->lock); + spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX); if (!task) goto done; @@ -1561,7 +1561,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session, bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); done: - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); return 0; } @@ -1584,7 +1584,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session, struct iscsi_tm_rsp *resp_hdr; tmf_cqe = (struct bnx2i_tmf_response *)cqe; - spin_lock(&session->lock); + spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); if (!task) @@ -1600,7 +1600,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session, __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); done: - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); return 0; } @@ -1623,7 +1623,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session, struct iscsi_logout_rsp *resp_hdr; logout = (struct bnx2i_logout_response *) cqe; - spin_lock(&session->lock); + spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); if (!task) @@ -1647,7 +1647,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session, bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD; done: - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); return 0; } @@ -1668,12 +1668,12 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, struct iscsi_task *task; nop_in = (struct bnx2i_nop_in_msg *)cqe; - spin_lock(&session->lock); + spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); if (task) __iscsi_put_task(task); - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); } /** @@ -1712,7 +1712,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session, nop_in = (struct bnx2i_nop_in_msg *)cqe; - spin_lock(&session->lock); + spin_lock(&session->back_lock); hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; memset(hdr, 0, sizeof(struct iscsi_hdr)); hdr->opcode = nop_in->op_code; @@ -1738,7 +1738,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session, } done: __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); return tgt_async_nop; } @@ -1771,7 +1771,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session, return; } - spin_lock(&session->lock); + spin_lock(&session->back_lock); resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); resp_hdr->opcode = async_cqe->op_code; @@ -1790,7 +1790,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session, __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, (struct iscsi_hdr *)resp_hdr, NULL, 0); - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); } @@ -1817,7 +1817,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session, } else bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); - spin_lock(&session->lock); + spin_lock(&session->back_lock); hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; memset(hdr, 0, sizeof(struct iscsi_hdr)); hdr->opcode = reject->op_code; @@ -1828,7 +1828,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session, hdr->ffffffff = cpu_to_be32(RESERVED_ITT); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, reject->data_length); - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); } /** @@ -1848,13 +1848,13 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, struct iscsi_task *task; cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; - spin_lock(&session->lock); + spin_lock(&session->back_lock); task = iscsi_itt_to_task(conn, cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); if (!task) printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); complete(&bnx2i_conn->cmd_cleanup_cmpl); } @@ -1870,7 +1870,7 @@ int bnx2i_percpu_io_thread(void *arg) struct bnx2i_work *work, *tmp; LIST_HEAD(work_list); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); while (!kthread_should_stop()) { spin_lock_bh(&p->p_work_lock); @@ -1921,11 +1921,11 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, int rc = 0; int cpu; - spin_lock(&session->lock); + spin_lock(&session->back_lock); task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data, cqe->itt & ISCSI_CMD_RESPONSE_INDEX); if (!task || !task->sc) { - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); return -EINVAL; } sc = task->sc; @@ -1935,7 +1935,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, else cpu = sc->request->cpu; - spin_unlock(&session->lock); + spin_unlock(&session->back_lock); p = &per_cpu(bnx2i_percpu, cpu); spin_lock(&p->p_work_lock); @@ -2738,8 +2738,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { reg_base = pci_resource_start(ep->hba->pcidev, BNX2X_DOORBELL_PCI_BAR); - reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) + - DPM_TRIGER_TYPE; + reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); goto arm_cq; } diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 50fef6963a8..80c03b452d6 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -1,6 +1,6 @@ /* bnx2i.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.7.2.2" -#define DRV_MODULE_RELDATE "Apr 25, 2012" +#define DRV_MODULE_VERSION "2.7.6.2" +#define DRV_MODULE_RELDATE "Jun 06, 2013" static char version[] = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ @@ -172,16 +172,14 @@ void bnx2i_start(void *handle) struct bnx2i_hba *hba = handle; int i = HZ; - /* - * We should never register devices that don't support iSCSI - * (see bnx2i_init_one), so something is wrong if we try to - * start a iSCSI adapter on hardware with 0 supported iSCSI - * connections + /* On some bnx2x devices, it is possible that iSCSI is no + * longer supported after firmware is downloaded. In that + * case, the iscsi_init_msg will return failure. */ - BUG_ON(!hba->cnic->max_iscsi_conn); bnx2i_send_fw_iscsi_init_msg(hba); - while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && + !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--) msleep(BNX2I_INIT_POLL_TIME); } @@ -539,11 +537,15 @@ static int __init bnx2i_mod_init(void) p->iothread = NULL; } + cpu_notifier_register_begin(); + for_each_online_cpu(cpu) bnx2i_percpu_thread_create(cpu); /* Initialize per CPU interrupt thread */ - register_hotcpu_notifier(&bnx2i_cpu_notifier); + __register_hotcpu_notifier(&bnx2i_cpu_notifier); + + cpu_notifier_register_done(); return 0; @@ -583,11 +585,15 @@ static void __exit bnx2i_mod_exit(void) } mutex_unlock(&bnx2i_dev_lock); - unregister_hotcpu_notifier(&bnx2i_cpu_notifier); + cpu_notifier_register_begin(); for_each_online_cpu(cpu) bnx2i_percpu_thread_destroy(cpu); + __unregister_hotcpu_notifier(&bnx2i_cpu_notifier); + + cpu_notifier_register_done(); + iscsi_unregister_transport(&bnx2i_iscsi_transport); cnic_unregister_driver(CNIC_ULP_ISCSI); } diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 0056e47bd56..166543f7ef5 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1,7 +1,7 @@ /* * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2006 - 2012 Broadcom Corporation + * Copyright (c) 2006 - 2013 Broadcom Corporation * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mike Christie * @@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) struct iscsi_bd *mp_bdt; u64 addr; - hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &hba->mp_bd_dma, GFP_KERNEL); if (!hba->mp_bd_tbl) { printk(KERN_ERR "unable to allocate Middle Path BDT\n"); @@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) goto out; } - hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, + CNIC_PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; rc = -1; @@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) addr = (unsigned long) hba->dummy_buf_dma; mp_bdt->buffer_addr_lo = addr & 0xffffffff; mp_bdt->buffer_addr_hi = addr >> 32; - mp_bdt->buffer_length = PAGE_SIZE; + mp_bdt->buffer_length = CNIC_PAGE_SIZE; mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | ISCSI_BD_FIRST_IN_BD_CHAIN; out: @@ -565,12 +566,12 @@ out: static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) { if (hba->mp_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->mp_bd_tbl, hba->mp_bd_dma); hba->mp_bd_tbl = NULL; } if (hba->dummy_buffer) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } @@ -596,7 +597,7 @@ void bnx2i_drop_session(struct iscsi_cls_session *cls_session) /** * bnx2i_ep_destroy_list_add - add an entry to EP destroy list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ @@ -613,7 +614,7 @@ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, * bnx2i_ep_destroy_list_del - add an entry to EP destroy list * * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * EP destroy queue manager */ @@ -630,7 +631,7 @@ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, /** * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ @@ -646,7 +647,7 @@ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, /** * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * pending conn offload completion queue manager */ @@ -721,7 +722,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) /** * bnx2i_ep_active_list_add - add an entry to ep active list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ @@ -737,7 +738,7 @@ static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, /** * bnx2i_ep_active_list_del - deletes an entry to ep active list * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure + * @ep: pointer to endpoint (transport identifier) structure * * current active conn queue manager */ @@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, struct bnx2i_conn *bnx2i_conn) { if (bnx2i_conn->gen_pdu.resp_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.resp_bd_tbl, bnx2i_conn->gen_pdu.resp_bd_dma); bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; } if (bnx2i_conn->gen_pdu.req_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; @@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; bnx2i_conn->gen_pdu.req_bd_tbl = - dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) goto login_req_bd_tbl_failure; bnx2i_conn->gen_pdu.resp_bd_tbl = - dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, &bnx2i_conn->gen_pdu.resp_bd_dma, GFP_KERNEL); if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) @@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, return 0; login_resp_bd_tbl_failure: - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, bnx2i_conn->gen_pdu.req_bd_tbl, bnx2i_conn->gen_pdu.req_bd_dma); bnx2i_conn->gen_pdu.req_bd_tbl = NULL; @@ -1169,10 +1170,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task) if (task->state == ISCSI_TASK_ABRT_TMF) { bnx2i_send_cmd_cleanup_req(hba, task->dd_data); - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->back_lock); wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); - spin_lock_bh(&conn->session->lock); + spin_lock_bh(&conn->session->back_lock); } bnx2i_iscsi_unmap_sg_list(task->dd_data); } @@ -1695,7 +1696,7 @@ no_nx2_route: /** * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources * @hba: pointer to adapter instance - * @ep: endpoint (transport indentifier) structure + * @ep: endpoint (transport identifier) structure * * destroys cm_sock structure and on chip iscsi context */ @@ -2059,7 +2060,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) goto out; if (session) { - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) { if (session->state == ISCSI_STATE_LOGGING_OUT) { if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { @@ -2075,7 +2076,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) } else close = 1; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); } bnx2i_ep->state = EP_STATE_DISCONN_START; diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c index c61cf7a4365..a0a3d9fe61f 100644 --- a/drivers/scsi/bnx2i/bnx2i_sysfs.c +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c @@ -1,6 +1,6 @@ /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. * - * Copyright (c) 2004 - 2012 Broadcom Corporation + * Copyright (c) 2004 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index 76e4c039f0d..d35a5d6c8d7 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -1,10 +1,10 @@ -/* +/* * ASCII values for a number of symbolic constants, printing functions, * etc. * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422) * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) * by D. Gilbert and aeb (20020609) - * Update to SPC-4 T10/1713-D Rev 20, 22 May 2009, D. Gilbert 20090624 + * Updated to SPC-4 T10/1713-D Rev 36g, D. Gilbert 20130701 */ #include <linux/blkdev.h> @@ -21,12 +21,13 @@ /* Commands with service actions that change the command name */ -#define MAINTENANCE_IN 0xa3 -#define MAINTENANCE_OUT 0xa4 #define SERVICE_ACTION_IN_12 0xab #define SERVICE_ACTION_OUT_12 0xa9 +#define SERVICE_ACTION_BIDIRECTIONAL 0x9d #define SERVICE_ACTION_IN_16 0x9e #define SERVICE_ACTION_OUT_16 0x9f +#define THIRD_PARTY_COPY_OUT 0x83 +#define THIRD_PARTY_COPY_IN 0x84 @@ -36,11 +37,11 @@ static const char * cdb_byte0_names[] = { /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, "Reassign Blocks", /* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL, -/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", +/* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", /* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)", "Reserve(6)", /* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)", -/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", +/* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", NULL, /* 20-22 */ NULL, NULL, NULL, /* 23-28 */ "Read Format Capacities", "Set Window", @@ -48,16 +49,16 @@ static const char * cdb_byte0_names[] = { /* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)", "Read updated block", /* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal", -/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", +/* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", /* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)", - "Read Defect Data(10)", -/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", - "Read Buffer", + "Read Defect Data(10)", +/* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", + "Read Buffer", /* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", /* 40-41 */ "Change Definition", "Write Same(10)", /* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", "Play audio(10)", "Get configuration", - "Play audio msf", "Play audio track/index", + "Play audio msf", "Sanitize/Play audio track/index", /* 49-4f */ "Play track relative(10)", "Get event status notification", "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", NULL, @@ -72,17 +73,17 @@ static const char * cdb_byte0_names[] = { /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB", "Variable length", -/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy", - "Receive copy results", +/* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", + "Third party copy out", "Third party copy in", /* 85-89 */ "ATA command pass through(16)", "Access control in", - "Access control out", "Read(16)", "Memory Export Out(16)", + "Access control out", "Read(16)", "Compare and Write", /* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes", "Write and verify(16)", "Verify(16)", /* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", "Lock/unlock cache(16)", "Write same(16)", NULL, /* 95-99 */ NULL, NULL, NULL, NULL, NULL, -/* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)", - "Service action out(16)", +/* 9a-9f */ NULL, NULL, NULL, "Service action bidirectional", + "Service action in(16)", "Service action out(16)", /* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank", "Security protocol in", "Maintenance in", "Maintenance out", "Move medium/play audio(12)", @@ -122,6 +123,7 @@ static const struct value_name_pair maint_out_arr[] = { {0x6, "Set identifying information"}, {0xa, "Set target port groups"}, {0xb, "Change aliases"}, + {0xc, "Remove I_T nexus"}, {0xe, "Set priority"}, {0xf, "Set timestamp"}, {0x10, "Management protocol out"}, @@ -138,10 +140,16 @@ static const struct value_name_pair serv_out12_arr[] = { }; #define SERV_OUT12_SZ ARRAY_SIZE(serv_out12_arr) +static const struct value_name_pair serv_bidi_arr[] = { + {-1, "dummy entry"}, +}; +#define SERV_BIDI_SZ ARRAY_SIZE(serv_bidi_arr) + static const struct value_name_pair serv_in16_arr[] = { {0x10, "Read capacity(16)"}, {0x11, "Read long(16)"}, {0x12, "Get LBA status"}, + {0x13, "Report referrals"}, }; #define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) @@ -151,6 +159,51 @@ static const struct value_name_pair serv_out16_arr[] = { }; #define SERV_OUT16_SZ ARRAY_SIZE(serv_out16_arr) +static const struct value_name_pair pr_in_arr[] = { + {0x0, "Persistent reserve in, read keys"}, + {0x1, "Persistent reserve in, read reservation"}, + {0x2, "Persistent reserve in, report capabilities"}, + {0x3, "Persistent reserve in, read full status"}, +}; +#define PR_IN_SZ ARRAY_SIZE(pr_in_arr) + +static const struct value_name_pair pr_out_arr[] = { + {0x0, "Persistent reserve out, register"}, + {0x1, "Persistent reserve out, reserve"}, + {0x2, "Persistent reserve out, release"}, + {0x3, "Persistent reserve out, clear"}, + {0x4, "Persistent reserve out, preempt"}, + {0x5, "Persistent reserve out, preempt and abort"}, + {0x6, "Persistent reserve out, register and ignore existing key"}, + {0x7, "Persistent reserve out, register and move"}, +}; +#define PR_OUT_SZ ARRAY_SIZE(pr_out_arr) + +/* SPC-4 rev 34 renamed the Extended Copy opcode to Third Party Copy Out. + LID1 (List Identifier length: 1 byte) is the Extended Copy found in SPC-2 + and SPC-3 */ +static const struct value_name_pair tpc_out_arr[] = { + {0x0, "Extended copy(LID1)"}, + {0x1, "Extended copy(LID4)"}, + {0x10, "Populate token"}, + {0x11, "Write using token"}, + {0x1c, "Copy operation abort"}, +}; +#define TPC_OUT_SZ ARRAY_SIZE(tpc_out_arr) + +static const struct value_name_pair tpc_in_arr[] = { + {0x0, "Receive copy status(LID1)"}, + {0x1, "Receive copy data(LID1)"}, + {0x3, "Receive copy operating parameters"}, + {0x4, "Receive copy failure details(LID1)"}, + {0x5, "Receive copy status(LID4)"}, + {0x6, "Receive copy data(LID4)"}, + {0x7, "Receive ROD token information"}, + {0x8, "Report all ROD tokens"}, +}; +#define TPC_IN_SZ ARRAY_SIZE(tpc_in_arr) + + static const struct value_name_pair variable_length_arr[] = { {0x1, "Rebuild(32)"}, {0x2, "Regenerate(32)"}, @@ -207,6 +260,7 @@ static const char * get_sa_name(const struct value_name_pair * arr, static void print_opcode_name(unsigned char * cdbp, int cdb_len) { int sa, len, cdb0; + int fin_name = 0; const char * name; cdb0 = cdbp[0]; @@ -219,7 +273,8 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len) break; } sa = (cdbp[8] << 8) + cdbp[9]; - name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa); + name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, + sa); if (name) printk("%s", name); else @@ -232,50 +287,57 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len) case MAINTENANCE_IN: sa = cdbp[1] & 0x1f; name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + fin_name = 1; break; case MAINTENANCE_OUT: sa = cdbp[1] & 0x1f; name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa); - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + fin_name = 1; + break; + case PERSISTENT_RESERVE_IN: + sa = cdbp[1] & 0x1f; + name = get_sa_name(pr_in_arr, PR_IN_SZ, sa); + fin_name = 1; + break; + case PERSISTENT_RESERVE_OUT: + sa = cdbp[1] & 0x1f; + name = get_sa_name(pr_out_arr, PR_OUT_SZ, sa); + fin_name = 1; break; case SERVICE_ACTION_IN_12: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa); - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + fin_name = 1; break; case SERVICE_ACTION_OUT_12: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa); - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + fin_name = 1; + break; + case SERVICE_ACTION_BIDIRECTIONAL: + sa = cdbp[1] & 0x1f; + name = get_sa_name(serv_bidi_arr, SERV_BIDI_SZ, sa); + fin_name = 1; break; case SERVICE_ACTION_IN_16: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa); - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + fin_name = 1; break; case SERVICE_ACTION_OUT_16: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa); - if (name) - printk("%s", name); - else - printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + fin_name = 1; + break; + case THIRD_PARTY_COPY_IN: + sa = cdbp[1] & 0x1f; + name = get_sa_name(tpc_in_arr, TPC_IN_SZ, sa); + fin_name = 1; + break; + case THIRD_PARTY_COPY_OUT: + sa = cdbp[1] & 0x1f; + name = get_sa_name(tpc_out_arr, TPC_OUT_SZ, sa); + fin_name = 1; break; default: if (cdb0 < 0xc0) { @@ -288,6 +350,12 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len) printk("cdb[0]=0x%x (vendor)", cdb0); break; } + if (fin_name) { + if (name) + printk("%s", name); + else + printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); + } } #else /* ifndef CONFIG_SCSI_CONSTANTS */ @@ -312,10 +380,15 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len) break; case MAINTENANCE_IN: case MAINTENANCE_OUT: + case PERSISTENT_RESERVE_IN: + case PERSISTENT_RESERVE_OUT: case SERVICE_ACTION_IN_12: case SERVICE_ACTION_OUT_12: + case SERVICE_ACTION_BIDIRECTIONAL: case SERVICE_ACTION_IN_16: case SERVICE_ACTION_OUT_16: + case THIRD_PARTY_COPY_IN: + case THIRD_PARTY_COPY_OUT: sa = cdbp[1] & 0x1f; printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; @@ -327,7 +400,7 @@ static void print_opcode_name(unsigned char * cdbp, int cdb_len) break; } } -#endif +#endif void __scsi_print_command(unsigned char *cdb) { @@ -336,7 +409,7 @@ void __scsi_print_command(unsigned char *cdb) print_opcode_name(cdb, 0); len = scsi_command_size(cdb); /* print out all bytes in cdb */ - for (k = 0; k < len; ++k) + for (k = 0; k < len; ++k) printk(" %02x", cdb[k]); printk("\n"); } @@ -404,8 +477,9 @@ struct error_info { /* * The canonical list of T10 Additional Sense Codes is available at: - * http://www.t10.org/lists/asc-num.txt + * http://www.t10.org/lists/asc-num.txt [most recent: 20130605] */ + static const struct error_info additional[] = { {0x0000, "No additional sense information"}, @@ -430,6 +504,8 @@ static const struct error_info additional[] = {0x001C, "Verify operation in progress"}, {0x001D, "ATA pass through information available"}, {0x001E, "Conflicting SA creation request"}, + {0x001F, "Logical unit transitioning to another power condition"}, + {0x0020, "Extended copy information available"}, {0x0100, "No index/sector signal"}, @@ -460,6 +536,17 @@ static const struct error_info additional[] = {0x0412, "Logical unit not ready, offline"}, {0x0413, "Logical unit not ready, SA creation in progress"}, {0x0414, "Logical unit not ready, space allocation in progress"}, + {0x0415, "Logical unit not ready, robotics disabled"}, + {0x0416, "Logical unit not ready, configuration required"}, + {0x0417, "Logical unit not ready, calibration required"}, + {0x0418, "Logical unit not ready, a door is open"}, + {0x0419, "Logical unit not ready, operating in sequential mode"}, + {0x041A, "Logical unit not ready, start stop unit command in " + "progress"}, + {0x041B, "Logical unit not ready, sanitize in progress"}, + {0x041C, "Logical unit not ready, additional power use not yet " + "granted"}, + {0x041D, "Logical unit not ready, configuration in progress"}, {0x0500, "Logical unit does not respond to selection"}, @@ -490,6 +577,7 @@ static const struct error_info additional[] = {0x0B06, "Warning - non-volatile cache now volatile"}, {0x0B07, "Warning - degraded power to non-volatile cache"}, {0x0B08, "Warning - power loss expected"}, + {0x0B09, "Warning - device statistics notification active"}, {0x0C00, "Write error"}, {0x0C01, "Write error - recovered with auto reallocation"}, @@ -505,6 +593,7 @@ static const struct error_info additional[] = {0x0C0B, "Auxiliary memory write error"}, {0x0C0C, "Write error - unexpected unsolicited data"}, {0x0C0D, "Write error - not enough unsolicited data"}, + {0x0C0E, "Multiple write errors"}, {0x0C0F, "Defects in error window"}, {0x0D00, "Error detected by third party temporary initiator"}, @@ -523,6 +612,8 @@ static const struct error_info additional[] = {0x1001, "Logical block guard check failed"}, {0x1002, "Logical block application tag check failed"}, {0x1003, "Logical block reference tag check failed"}, + {0x1004, "Logical block protection error on recover buffered data"}, + {0x1005, "Logical block protection method error"}, {0x1100, "Unrecovered read error"}, {0x1101, "Read retries exhausted"}, @@ -545,6 +636,7 @@ static const struct error_info additional[] = {0x1112, "Auxiliary memory read error"}, {0x1113, "Read error - failed retransmission request"}, {0x1114, "Read error - lba marked bad by application client"}, + {0x1115, "Write after sanitize required"}, {0x1200, "Address mark not found for id field"}, @@ -622,6 +714,7 @@ static const struct error_info additional[] = {0x2009, "Access denied - invalid LU identifier"}, {0x200A, "Access denied - invalid proxy token"}, {0x200B, "Access denied - ACL LUN conflict"}, + {0x200C, "Illegal command when not in append-only mode"}, {0x2100, "Logical block address out of range"}, {0x2101, "Invalid element address"}, @@ -630,6 +723,19 @@ static const struct error_info additional[] = {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, + {0x2300, "Invalid token operation, cause not reportable"}, + {0x2301, "Invalid token operation, unsupported token type"}, + {0x2302, "Invalid token operation, remote token usage not supported"}, + {0x2303, "Invalid token operation, remote rod token creation not " + "supported"}, + {0x2304, "Invalid token operation, token unknown"}, + {0x2305, "Invalid token operation, token corrupt"}, + {0x2306, "Invalid token operation, token revoked"}, + {0x2307, "Invalid token operation, token expired"}, + {0x2308, "Invalid token operation, token cancelled"}, + {0x2309, "Invalid token operation, token deleted"}, + {0x230A, "Invalid token operation, invalid token length"}, + {0x2400, "Invalid field in cdb"}, {0x2401, "CDB decryption error"}, {0x2402, "Obsolete"}, @@ -705,6 +811,7 @@ static const struct error_info additional[] = "event"}, {0x2A13, "Data encryption key instance counter has changed"}, {0x2A14, "SA creation capabilities data has changed"}, + {0x2A15, "Medium removal prevention preempted"}, {0x2B00, "Copy cannot execute since host cannot disconnect"}, @@ -720,6 +827,7 @@ static const struct error_info additional[] = {0x2C09, "Previous reservation conflict status"}, {0x2C0A, "Partition or collection contains user objects"}, {0x2C0B, "Not reserved"}, + {0x2C0C, "Orwrite generation does not match"}, {0x2D00, "Overwrite error on update in place"}, @@ -728,6 +836,7 @@ static const struct error_info additional[] = {0x2F00, "Commands cleared by another initiator"}, {0x2F01, "Commands cleared by power loss notification"}, {0x2F02, "Commands cleared by device server"}, + {0x2F03, "Some commands cleared by queuing layer event"}, {0x3000, "Incompatible medium installed"}, {0x3001, "Cannot read medium - unknown format"}, @@ -745,10 +854,12 @@ static const struct error_info additional[] = {0x3010, "Medium not formatted"}, {0x3011, "Incompatible volume type"}, {0x3012, "Incompatible volume qualifier"}, + {0x3013, "Cleaning volume expired"}, {0x3100, "Medium format corrupted"}, {0x3101, "Format command failed"}, {0x3102, "Zoned formatting failed due to spare linking"}, + {0x3103, "Sanitize command failed"}, {0x3200, "No defect spare location available"}, {0x3201, "Defect list update failure"}, @@ -809,6 +920,8 @@ static const struct error_info additional[] = {0x3B19, "Element enabled"}, {0x3B1A, "Data transfer device removed"}, {0x3B1B, "Data transfer device inserted"}, + {0x3B1C, "Too many logical objects on partition to support " + "operation"}, {0x3D00, "Invalid bits in identify message"}, @@ -839,6 +952,7 @@ static const struct error_info additional[] = {0x3F12, "iSCSI IP address added"}, {0x3F13, "iSCSI IP address removed"}, {0x3F14, "iSCSI IP address changed"}, + {0x3F15, "Inspect referrals sense descriptors"}, /* * {0x40NN, "Ram failure"}, * {0x40NN, "Diagnostic failure on component nn"}, @@ -848,6 +962,7 @@ static const struct error_info additional[] = {0x4300, "Message error"}, {0x4400, "Internal target failure"}, + {0x4401, "Persistent reservation information lost"}, {0x4471, "ATA device failed set features"}, {0x4500, "Select or reselect failure"}, @@ -876,6 +991,21 @@ static const struct error_info additional[] = {0x4B04, "Nak received"}, {0x4B05, "Data offset error"}, {0x4B06, "Initiator response timeout"}, + {0x4B07, "Connection lost"}, + {0x4B08, "Data-in buffer overflow - data buffer size"}, + {0x4B09, "Data-in buffer overflow - data buffer descriptor area"}, + {0x4B0A, "Data-in buffer error"}, + {0x4B0B, "Data-out buffer overflow - data buffer size"}, + {0x4B0C, "Data-out buffer overflow - data buffer descriptor area"}, + {0x4B0D, "Data-out buffer error"}, + {0x4B0E, "PCIe fabric error"}, + {0x4B0F, "PCIe completion timeout"}, + {0x4B10, "PCIe completer abort"}, + {0x4B11, "PCIe poisoned tlp received"}, + {0x4B12, "PCIe eCRC check failed"}, + {0x4B13, "PCIe unsupported request"}, + {0x4B14, "PCIe acs violation"}, + {0x4B15, "PCIe tlp prefix blocked"}, {0x4C00, "Logical unit failed self-configuration"}, /* @@ -897,6 +1027,10 @@ static const struct error_info additional[] = {0x5302, "Medium removal prevented"}, {0x5303, "Medium removal prevented by data transfer element"}, {0x5304, "Medium thread or unthread failure"}, + {0x5305, "Volume identifier invalid"}, + {0x5306, "Volume identifier missing"}, + {0x5307, "Duplicate volume identifier"}, + {0x5308, "Element status unknown"}, {0x5400, "Scsi to host system interface failure"}, @@ -911,6 +1045,9 @@ static const struct error_info additional[] = {0x5508, "Maximum number of supplemental decryption keys exceeded"}, {0x5509, "Medium auxiliary memory not accessible"}, {0x550A, "Data currently unavailable"}, + {0x550B, "Insufficient power for operation"}, + {0x550C, "Insufficient resources to create rod"}, + {0x550D, "Insufficient resources to create rod token"}, {0x5700, "Unable to recover table-of-contents"}, @@ -1069,6 +1206,7 @@ static const struct error_info additional[] = {0x670B, "ATA device feature not enabled"}, {0x6800, "Logical unit not configured"}, + {0x6801, "Subsidiary logical unit not configured"}, {0x6900, "Data loss on logical unit"}, {0x6901, "Multiple logical unit failures"}, @@ -1185,10 +1323,13 @@ static const char * const snstext[] = { "Vendor Specific(9)", "Copy Aborted", /* A: COPY or COMPARE was aborted */ "Aborted Command", /* B: The target aborted the command */ - "Equal", /* C: A SEARCH DATA command found data equal */ + "Equal", /* C: A SEARCH DATA command found data equal, + reserved in SPC-4 rev 36 */ "Volume Overflow", /* D: Medium full with still data to be written */ "Miscompare", /* E: Source data and data on the medium do not agree */ + "Completed", /* F: command completed sense data reported, + may occur for successful command */ }; #endif @@ -1306,7 +1447,7 @@ scsi_decode_sense_buffer(const unsigned char *sense_buffer, int sense_len, struct scsi_sense_hdr *sshdr) { int k, num, res; - + res = scsi_normalize_sense(sense_buffer, sense_len, sshdr); if (0 == res) { /* this may be SCSI-1 sense data */ @@ -1459,5 +1600,3 @@ void scsi_print_result(struct scsi_cmnd *cmd) scsi_show_result(cmd->result); } EXPORT_SYMBOL(scsi_print_result); - - diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile index b581966c88f..913b9a92fb0 100644 --- a/drivers/scsi/csiostor/Makefile +++ b/drivers/scsi/csiostor/Makefile @@ -8,4 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4 obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ - csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o + csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \ + csio_mb.o csio_rnode.o csio_wr.o diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index bdd78fb4fc7..0eaec474895 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -61,7 +61,7 @@ int csio_msi = 2; static int dev_num; /* FCoE Adapter types & its description */ -static const struct csio_adap_desc csio_fcoe_adapters[] = { +static const struct csio_adap_desc csio_t4_fcoe_adapters[] = { {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, @@ -77,7 +77,38 @@ static const struct csio_adap_desc csio_fcoe_adapters[] = { {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, - {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"} + {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, + {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"}, + {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"}, + {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"}, + {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"}, + {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"}, + {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"}, + {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"}, + {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"} +}; + +static const struct csio_adap_desc csio_t5_fcoe_adapters[] = { + {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"}, + {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"}, + {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"}, + {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"}, + {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"}, + {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"}, + {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"}, + {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"}, + {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"}, + {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"}, + {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"}, + {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"}, + {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"}, + {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"}, + {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"}, + {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"}, + {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"}, + {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"}, + {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"}, + {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"} }; static void csio_mgmtm_cleanup(struct csio_mgmtm *); @@ -124,7 +155,7 @@ int csio_is_hw_removing(struct csio_hw *hw) * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ -static int +int csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, int polarity, int attempts, int delay, uint32_t *valp) { @@ -145,6 +176,24 @@ csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, } } +/* + * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @hw: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void +csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr, + unsigned int mask, unsigned int val) +{ + csio_wr_reg32(hw, addr, TP_PIO_ADDR); + val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask; + csio_wr_reg32(hw, val, TP_PIO_DATA); +} + void csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, uint32_t value) @@ -157,242 +206,22 @@ csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, } -/* - * csio_hw_mc_read - read from MC through backdoor accesses - * @hw: the hw module - * @addr: address of first byte requested - * @data: 64 bytes of data containing the requested address - * @ecc: where to store the corresponding 64-bit ECC word - * - * Read 64 bytes of data from MC starting at a 64-byte-aligned address - * that covers the requested address @addr. If @parity is not %NULL it - * is assigned the 64-bit ECC word for the read data. - */ -int -csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data, - uint64_t *ecc) -{ - int i; - - if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST) - return -EBUSY; - csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR); - csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN); - csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN); - csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), - MC_BIST_CMD); - i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST, - 0, 10, 1, NULL); - if (i) - return i; - -#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) - - for (i = 15; i >= 0; i--) - *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); - if (ecc) - *ecc = csio_rd_reg64(hw, MC_DATA(16)); -#undef MC_DATA - return 0; -} - -/* - * csio_hw_edc_read - read from EDC through backdoor accesses - * @hw: the hw module - * @idx: which EDC to access - * @addr: address of first byte requested - * @data: 64 bytes of data containing the requested address - * @ecc: where to store the corresponding 64-bit ECC word - * - * Read 64 bytes of data from EDC starting at a 64-byte-aligned address - * that covers the requested address @addr. If @parity is not %NULL it - * is assigned the 64-bit ECC word for the read data. - */ -int -csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, - uint64_t *ecc) -{ - int i; - - idx *= EDC_STRIDE; - if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST) - return -EBUSY; - csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx); - csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx); - csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx); - csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST, - EDC_BIST_CMD + idx); - i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST, - 0, 10, 1, NULL); - if (i) - return i; - -#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) - - for (i = 15; i >= 0; i--) - *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); - if (ecc) - *ecc = csio_rd_reg64(hw, EDC_DATA(16)); -#undef EDC_DATA - return 0; -} - -/* - * csio_mem_win_rw - read/write memory through PCIE memory window - * @hw: the adapter - * @addr: address of first byte requested - * @data: MEMWIN0_APERTURE bytes of data containing the requested address - * @dir: direction of transfer 1 => read, 0 => write - * - * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a - * MEMWIN0_APERTURE-byte-aligned address that covers the requested - * address @addr. - */ -static int -csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir) -{ - int i; - - /* - * Setup offset into PCIE memory window. Address must be a - * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to - * ensure that changes propagate before we attempt to use the new - * values.) - */ - csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1), - PCIE_MEM_ACCESS_OFFSET); - csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET); - - /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ - for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) { - if (dir) - *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i)); - else - csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i)); - } - - return 0; -} - -/* - * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window - * @hw: the csio_hw - * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC - * @addr: address within indicated memory type - * @len: amount of memory to transfer - * @buf: host memory buffer - * @dir: direction of transfer 1 => read, 0 => write - * - * Reads/writes an [almost] arbitrary memory region in the firmware: the - * firmware memory address, length and host buffer must be aligned on - * 32-bit boudaries. The memory is transferred as a raw byte sequence - * from/to the firmware's memory. If this memory contains data - * structures which contain multi-byte integers, it's the callers - * responsibility to perform appropriate byte order conversions. - */ -static int -csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len, - uint32_t *buf, int dir) -{ - uint32_t pos, start, end, offset, memoffset; - int ret; - uint32_t *data; - - /* - * Argument sanity checks ... - */ - if ((addr & 0x3) || (len & 0x3)) - return -EINVAL; - - data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL); - if (!data) - return -ENOMEM; - - /* Offset into the region of memory which is being accessed - * MEM_EDC0 = 0 - * MEM_EDC1 = 1 - * MEM_MC = 2 - */ - memoffset = (mtype * (5 * 1024 * 1024)); - - /* Determine the PCIE_MEM_ACCESS_OFFSET */ - addr = addr + memoffset; - - /* - * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes - * at a time so we need to round down the start and round up the end. - * We'll start copying out of the first line at (addr - start) a word - * at a time. - */ - start = addr & ~(MEMWIN0_APERTURE-1); - end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); - offset = (addr - start)/sizeof(__be32); - - for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { - /* - * If we're writing, copy the data from the caller's memory - * buffer - */ - if (!dir) { - /* - * If we're doing a partial write, then we need to do - * a read-modify-write ... - */ - if (offset || len < MEMWIN0_APERTURE) { - ret = csio_mem_win_rw(hw, pos, data, 1); - if (ret) { - kfree(data); - return ret; - } - } - while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && - len > 0) { - data[offset++] = *buf++; - len -= sizeof(__be32); - } - } - - /* - * Transfer a block of memory and bail if there's an error. - */ - ret = csio_mem_win_rw(hw, pos, data, dir); - if (ret) { - kfree(data); - return ret; - } - - /* - * If we're reading, copy the data into the caller's memory - * buffer. - */ - if (dir) - while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && - len > 0) { - *buf++ = data[offset++]; - len -= sizeof(__be32); - } - } - - kfree(data); - - return 0; -} - static int csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) { - return csio_memory_rw(hw, mtype, addr, len, buf, 0); + return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype, + addr, len, buf, 0); } /* * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */ -#define EEPROM_MAX_RD_POLL 40 -#define EEPROM_MAX_WR_POLL 6 -#define EEPROM_STAT_ADDR 0x7bfc -#define VPD_BASE 0x400 -#define VPD_BASE_OLD 0 -#define VPD_LEN 512 +#define EEPROM_MAX_RD_POLL 40 +#define EEPROM_MAX_WR_POLL 6 +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0x400 +#define VPD_BASE_OLD 0 +#define VPD_LEN 1024 #define VPD_INFO_FLD_HDR_SIZE 3 /* @@ -817,23 +646,6 @@ out: return 0; } -/* - * csio_hw_flash_cfg_addr - return the address of the flash - * configuration file - * @hw: the HW module - * - * Return the address within the flash where the Firmware Configuration - * File is stored. - */ -static unsigned int -csio_hw_flash_cfg_addr(struct csio_hw *hw) -{ - if (hw->params.sf_size == 0x100000) - return FPGA_FLASH_CFG_OFFSET; - else - return FLASH_CFG_OFFSET; -} - static void csio_hw_print_fw_version(struct csio_hw *hw, char *str) { @@ -898,13 +710,13 @@ csio_hw_check_fw_version(struct csio_hw *hw) minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); - if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ + if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */ csio_err(hw, "card FW has major version %u, driver wants %u\n", - major, FW_VERSION_MAJOR); + major, FW_VERSION_MAJOR(hw)); return -EINVAL; } - if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) + if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw)) return 0; /* perfect match */ /* Minor/micro version mismatch */ @@ -1040,100 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw) return 0; } -static void -csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) -{ - uint16_t val; - uint32_t pcie_cap; - - if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { - pci_read_config_word(hw->pdev, - pcie_cap + PCI_EXP_DEVCTL2, &val); - val &= 0xfff0; - val |= range ; - pci_write_config_word(hw->pdev, - pcie_cap + PCI_EXP_DEVCTL2, val); - } -} - - -/* - * Return the specified PCI-E Configuration Space register from our Physical - * Function. We try first via a Firmware LDST Command since we prefer to let - * the firmware own all of these registers, but if that fails we go for it - * directly ourselves. - */ -static uint32_t -csio_read_pcie_cfg4(struct csio_hw *hw, int reg) -{ - u32 val = 0; - struct csio_mb *mbp; - int rv; - struct fw_ldst_cmd *ldst_cmd; - - mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); - if (!mbp) { - CSIO_INC_STATS(hw, n_err_nomem); - pci_read_config_dword(hw->pdev, reg, &val); - return val; - } - - csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg); - - rv = csio_mb_issue(hw, mbp); - - /* - * If the LDST Command suucceeded, exctract the returned register - * value. Otherwise read it directly ourself. - */ - if (rv == 0) { - ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); - val = ntohl(ldst_cmd->u.pcie.data[0]); - } else - pci_read_config_dword(hw->pdev, reg, &val); - - mempool_free(mbp, hw->mb_mempool); - - return val; -} /* csio_read_pcie_cfg4 */ - -static int -csio_hw_set_mem_win(struct csio_hw *hw) -{ - u32 bar0; - - /* - * Truncation intentional: we only read the bottom 32-bits of the - * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to - * read BAR0 instead of using pci_resource_start() because we could be - * operating from within a Virtual Machine which is trapping our - * accesses to our Configuration Space and we need to set up the PCI-E - * Memory Window decoders with the actual addresses which will be - * coming across the PCI-E link. - */ - bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); - bar0 &= PCI_BASE_ADDRESS_MEM_MASK; - - /* - * Set up memory window for accessing adapter memory ranges. (Read - * back MA register to ensure that changes propagate before we attempt - * to use the new values.) - */ - csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN0_APERTURE) - 10), - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0)); - csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN1_APERTURE) - 10), - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1)); - csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) | - WINDOW(ilog2(MEMWIN2_APERTURE) - 10), - PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); - csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); - return 0; -} /* csio_hw_set_mem_win */ - - - /*****************************************************************************/ /* HW State machine assists */ /*****************************************************************************/ @@ -1234,7 +952,9 @@ retry: for (;;) { uint32_t pcie_fw; + spin_unlock_irq(&hw->lock); msleep(50); + spin_lock_irq(&hw->lock); waiting -= 50; /* @@ -1861,87 +1581,6 @@ out: return rv; } -static int -csio_config_global_rss(struct csio_hw *hw) -{ - struct csio_mb *mbp; - enum fw_retval retval; - - mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); - if (!mbp) { - CSIO_INC_STATS(hw, n_err_nomem); - return -ENOMEM; - } - - csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO, - FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, - FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | - FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | - FW_RSS_GLB_CONFIG_CMD_TNLALLLKP, - NULL); - - if (csio_mb_issue(hw, mbp)) { - csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n"); - mempool_free(mbp, hw->mb_mempool); - return -EINVAL; - } - - retval = csio_mb_fw_retval(mbp); - if (retval != FW_SUCCESS) { - csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval); - mempool_free(mbp, hw->mb_mempool); - return -EINVAL; - } - - mempool_free(mbp, hw->mb_mempool); - - return 0; -} - -/* - * csio_config_pfvf - Configure Physical/Virtual functions settings. - * @hw: HW module - * - */ -static int -csio_config_pfvf(struct csio_hw *hw) -{ - struct csio_mb *mbp; - enum fw_retval retval; - - mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); - if (!mbp) { - CSIO_INC_STATS(hw, n_err_nomem); - return -ENOMEM; - } - - /* - * For now, allow all PFs to access to all ports using a pmask - * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will - * need to provide access based on some rule. - */ - csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ, - CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK, - CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL); - - if (csio_mb_issue(hw, mbp)) { - csio_err(hw, "Issue of FW_PFVF_CMD failed!\n"); - mempool_free(mbp, hw->mb_mempool); - return -EINVAL; - } - - retval = csio_mb_fw_retval(mbp); - if (retval != FW_SUCCESS) { - csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval); - mempool_free(mbp, hw->mb_mempool); - return -EINVAL; - } - - mempool_free(mbp, hw->mb_mempool); - - return 0; -} - /* * csio_enable_ports - Bring up all available ports. * @hw: HW module. @@ -2121,9 +1760,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) uint32_t *cfg_data; int value_to_add = 0; - if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) { - csio_err(hw, "could not find config file " CSIO_CF_FNAME - ",err: %d\n", ret); + if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) { + csio_err(hw, "could not find config file %s, err: %d\n", + CSIO_CF_FNAME(hw), ret); return -ENOENT; } @@ -2147,9 +1786,24 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path) ret = csio_memory_write(hw, mtype, maddr, cf->size + value_to_add, cfg_data); + + if ((ret == 0) && (value_to_add != 0)) { + union { + u32 word; + char buf[4]; + } last; + size_t size = cf->size & ~0x3; + int i; + + last.word = cfg_data[size >> 2]; + for (i = value_to_add; i < 4; i++) + last.buf[i] = 0; + ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word); + } if (ret == 0) { - csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n"); - strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64); + csio_info(hw, "config file upgraded to %s\n", + CSIO_CF_FNAME(hw)); + snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw)); } leave: @@ -2179,7 +1833,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) { unsigned int mtype, maddr; int rv; - uint32_t finiver, finicsum, cfcsum; + uint32_t finiver = 0, finicsum = 0, cfcsum = 0; int using_flash; char path[64]; @@ -2207,7 +1861,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) * config file from flash. */ mtype = FW_MEMTYPE_CF_FLASH; - maddr = csio_hw_flash_cfg_addr(hw); + maddr = hw->chip_ops->chip_flash_cfg_addr(hw); using_flash = 1; } else { /* @@ -2305,16 +1959,6 @@ csio_hw_no_fwconfig(struct csio_hw *hw, int reset) if (rv != 0) goto out; - /* Config Global RSS command */ - rv = csio_config_global_rss(hw); - if (rv != 0) - goto out; - - /* Configure PF/VF capabilities of device */ - rv = csio_config_pfvf(hw); - if (rv != 0) - goto out; - /* device parameters */ rv = csio_get_device_params(hw); if (rv != 0) @@ -2346,30 +1990,32 @@ csio_hw_flash_fw(struct csio_hw *hw) struct pci_dev *pci_dev = hw->pdev; struct device *dev = &pci_dev->dev ; - if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) { - csio_err(hw, "could not find firmware image " CSIO_FW_FNAME - ",err: %d\n", ret); + if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) { + csio_err(hw, "could not find firmware image %s, err: %d\n", + CSIO_FW_FNAME(hw), ret); return -EINVAL; } hdr = (const struct fw_hdr *)fw->data; fw_ver = ntohl(hdr->fw_ver); - if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR) + if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw)) return -EINVAL; /* wrong major version, won't do */ /* * If the flash FW is unusable or we found something newer, load it. */ - if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR || + if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) || fw_ver > hw->fwrev) { ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, /*force=*/false); if (!ret) - csio_info(hw, "firmware upgraded to version %pI4 from " - CSIO_FW_FNAME "\n", &hdr->fw_ver); + csio_info(hw, + "firmware upgraded to version %pI4 from %s\n", + &hdr->fw_ver, CSIO_FW_FNAME(hw)); else csio_err(hw, "firmware upgrade failed! err=%d\n", ret); - } + } else + ret = -EINVAL; release_firmware(fw); @@ -2407,10 +2053,12 @@ csio_hw_configure(struct csio_hw *hw) goto out; } - /* Set pci completion timeout value to 4 seconds. */ - csio_set_pcie_completion_timeout(hw, 0xd); + /* Set PCIe completion timeout to 4 seconds */ + if (pci_is_pcie(hw->pdev)) + pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd); - csio_hw_set_mem_win(hw); + hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); rv = csio_hw_get_fw_version(hw, &hw->fwrev); if (rv != 0) @@ -2478,6 +2126,8 @@ csio_hw_configure(struct csio_hw *hw) } else { if (hw->fw_state == CSIO_DEV_STATE_INIT) { + hw->flags |= CSIO_HWF_USING_SOFT_PARAMS; + /* device parameters */ rv = csio_get_device_params(hw); if (rv != 0) @@ -2651,7 +2301,7 @@ csio_hw_intr_disable(struct csio_hw *hw) } -static void +void csio_hw_fatal_err(struct csio_hw *hw) { csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); @@ -2990,14 +2640,6 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) /* END: HW SM */ /*****************************************************************************/ -/* Slow path handlers */ -struct intr_info { - unsigned int mask; /* bits to check in interrupt status */ - const char *msg; /* message to print or NULL */ - short stat_idx; /* stat counter to increment or -1 */ - unsigned short fatal; /* whether the condition reported is fatal */ -}; - /* * csio_handle_intr_status - table driven interrupt handler * @hw: HW instance @@ -3011,7 +2653,7 @@ struct intr_info { * by an entry specifying mask 0. Returns the number of fatal interrupt * conditions. */ -static int +int csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, const struct intr_info *acts) { @@ -3038,80 +2680,6 @@ csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, } /* - * Interrupt handler for the PCIE module. - */ -static void -csio_pcie_intr_handler(struct csio_hw *hw) -{ - static struct intr_info sysbus_intr_info[] = { - { RNPP, "RXNP array parity error", -1, 1 }, - { RPCP, "RXPC array parity error", -1, 1 }, - { RCIP, "RXCIF array parity error", -1, 1 }, - { RCCP, "Rx completions control array parity error", -1, 1 }, - { RFTP, "RXFT array parity error", -1, 1 }, - { 0, NULL, 0, 0 } - }; - static struct intr_info pcie_port_intr_info[] = { - { TPCP, "TXPC array parity error", -1, 1 }, - { TNPP, "TXNP array parity error", -1, 1 }, - { TFTP, "TXFT array parity error", -1, 1 }, - { TCAP, "TXCA array parity error", -1, 1 }, - { TCIP, "TXCIF array parity error", -1, 1 }, - { RCAP, "RXCA array parity error", -1, 1 }, - { OTDD, "outbound request TLP discarded", -1, 1 }, - { RDPE, "Rx data parity error", -1, 1 }, - { TDUE, "Tx uncorrectable data error", -1, 1 }, - { 0, NULL, 0, 0 } - }; - static struct intr_info pcie_intr_info[] = { - { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, - { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, - { MSIDATAPERR, "MSI data parity error", -1, 1 }, - { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, - { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, - { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, - { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, - { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, - { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, - { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, - { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, - { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, - { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, - { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, - { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, - { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, - { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, - { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, - { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, - { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, - { FIDPERR, "PCI FID parity error", -1, 1 }, - { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, - { MATAGPERR, "PCI MA tag parity error", -1, 1 }, - { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, - { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, - { RXWRPERR, "PCI Rx write parity error", -1, 1 }, - { RPLPERR, "PCI replay buffer parity error", -1, 1 }, - { PCIESINT, "PCI core secondary fault", -1, 1 }, - { PCIEPINT, "PCI core primary fault", -1, 1 }, - { UNXSPLCPLERR, "PCI unexpected split completion error", -1, - 0 }, - { 0, NULL, 0, 0 } - }; - - int fat; - - fat = csio_handle_intr_status(hw, - PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, - sysbus_intr_info) + - csio_handle_intr_status(hw, - PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, - pcie_port_intr_info) + - csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); - if (fat) - csio_hw_fatal_err(hw); -} - -/* * TP interrupt handler. */ static void csio_tp_intr_handler(struct csio_hw *hw) @@ -3517,7 +3085,7 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw) */ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) { - uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port)); v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; if (!v) @@ -3527,7 +3095,7 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); if (v & RXFIFO_PRTY_ERR) csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); - csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port)); csio_hw_fatal_err(hw); } @@ -3596,7 +3164,7 @@ csio_hw_slow_intr_handler(struct csio_hw *hw) csio_xgmac_intr_handler(hw, 3); if (cause & PCIE) - csio_pcie_intr_handler(hw); + hw->chip_ops->chip_pcie_intr_handler(hw); if (cause & MC) csio_mem_intr_handler(hw, MEM_MC); @@ -3892,7 +3460,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, struct csio_fl_dma_buf *flb, void *priv) { __u8 op; - __be64 *data; void *msg = NULL; uint32_t msg_len = 0; bool msg_sg = 0; @@ -3908,8 +3475,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, msg = (void *) flb; msg_len = flb->totlen; msg_sg = 1; - - data = (__be64 *) msg; } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) { CSIO_INC_STATS(hw, n_cpl_fw6_msg); @@ -3917,8 +3482,6 @@ csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len, msg = (void *)((uintptr_t)wr + sizeof(__be64)); msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) : sizeof(struct cpl_fw4_msg); - - data = (__be64 *) msg; } else { csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op); CSIO_INC_STATS(hw, n_cpl_unexp); @@ -4262,6 +3825,7 @@ csio_hw_get_device_id(struct csio_hw *hw) &hw->params.pci.device_id); csio_dev_id_cached(hw); + hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK); } /* csio_hw_get_device_id */ @@ -4280,19 +3844,21 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id) prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); - if (prot_type == CSIO_FPGA) { + if (prot_type == CSIO_T4_FCOE_ASIC) { + memcpy(hw->hw_ver, + csio_t4_fcoe_adapters[adap_type].model_no, 16); memcpy(hw->model_desc, - csio_fcoe_adapters[13].description, 32); - } else if (prot_type == CSIO_T4_FCOE_ASIC) { + csio_t4_fcoe_adapters[adap_type].description, + 32); + } else if (prot_type == CSIO_T5_FCOE_ASIC) { memcpy(hw->hw_ver, - csio_fcoe_adapters[adap_type].model_no, 16); + csio_t5_fcoe_adapters[adap_type].model_no, 16); memcpy(hw->model_desc, - csio_fcoe_adapters[adap_type].description, 32); + csio_t5_fcoe_adapters[adap_type].description, + 32); } else { char tempName[32] = "Chelsio FCoE Controller"; memcpy(hw->model_desc, tempName, 32); - - CSIO_DB_ASSERT(0); } } } /* csio_hw_set_description */ @@ -4321,6 +3887,9 @@ csio_hw_init(struct csio_hw *hw) strcpy(hw->name, CSIO_HW_NAME); + /* Initialize the HW chip ops with T4/T5 specific ops */ + hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops; + /* Set the model & its description */ ven_id = hw->params.pci.vendor_id; diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 9edcca4c71a..49b1daa4476 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h @@ -48,6 +48,7 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> +#include "csio_hw_chip.h" #include "csio_wr.h" #include "csio_mb.h" #include "csio_scsi.h" @@ -60,13 +61,6 @@ */ #define FW_HOSTERROR 255 -#define CSIO_FW_FNAME "cxgb4/t4fw.bin" -#define CSIO_CF_FNAME "cxgb4/t4-config.txt" - -#define FW_VERSION_MAJOR 1 -#define FW_VERSION_MINOR 2 -#define FW_VERSION_MICRO 8 - #define CSIO_HW_NAME "Chelsio FCoE Adapter" #define CSIO_MAX_PFN 8 #define CSIO_MAX_PPORTS 4 @@ -123,8 +117,6 @@ extern int csio_msi; #define CSIO_VENDOR_ID 0x1425 #define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 #define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF -#define CSIO_FPGA 0xA000 -#define CSIO_T4_FCOE_ASIC 0x4600 #define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ EDC1 | LE | TP | MA | PM_TX | PM_RX | \ @@ -161,17 +153,6 @@ enum { CSIO_SGE_INT_CNT_VAL_1 = 4, CSIO_SGE_INT_CNT_VAL_2 = 8, CSIO_SGE_INT_CNT_VAL_3 = 16, - - /* Storage specific - used by FW_PFVF_CMD */ - CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */ - CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */ - CSIO_NVI = 4, - CSIO_NIQ_FLINT = 34, - CSIO_NETH_CTRL = 32, - CSIO_NEQ = 66, - CSIO_NEXACTF = 32, - CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK, - CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK, }; /* Slowpath events */ @@ -207,17 +188,6 @@ enum { SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ }; -enum { MEM_EDC0, MEM_EDC1, MEM_MC }; - -enum { - MEMWIN0_APERTURE = 2048, - MEMWIN0_BASE = 0x1b800, - MEMWIN1_APERTURE = 32768, - MEMWIN1_BASE = 0x28000, - MEMWIN2_APERTURE = 65536, - MEMWIN2_BASE = 0x30000, -}; - /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ @@ -239,9 +209,6 @@ enum { FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/ FLASH_CFG_OFFSET = 0x1f0000, FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE, - FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is - * at 1MB - 64KB */ - FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE, }; /* @@ -259,6 +226,8 @@ enum { FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + /* Location of Firmware Configuration File in FLASH. */ + FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), }; #undef FLASH_START @@ -310,7 +279,7 @@ struct csio_adap_desc { struct pci_params { uint16_t vendor_id; uint16_t device_id; - uint32_t vpd_cap_addr; + int vpd_cap_addr; uint16_t speed; uint8_t width; }; @@ -513,6 +482,7 @@ struct csio_hw { uint32_t fwrev; uint32_t tp_vers; char chip_ver; + uint16_t chip_id; /* Tells T4/T5 chip */ uint32_t cfg_finiver; uint32_t cfg_finicsum; uint32_t cfg_cfcsum; @@ -556,6 +526,9 @@ struct csio_hw { */ struct csio_fcoe_res_info fres_info; /* Fcoe resource info */ + struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific + * Operations + */ /* MSIX vectors */ struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS]; @@ -636,9 +609,16 @@ csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us) #define csio_dbg(__hw, __fmt, ...) #endif +int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int, + int, int, uint32_t *); +void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int, + unsigned int, unsigned int); int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *); void csio_hw_intr_disable(struct csio_hw *); -int csio_hw_slow_intr_handler(struct csio_hw *hw); +int csio_hw_slow_intr_handler(struct csio_hw *); +int csio_handle_intr_status(struct csio_hw *, unsigned int, + const struct intr_info *); + int csio_hw_start(struct csio_hw *); int csio_hw_stop(struct csio_hw *); int csio_hw_reset(struct csio_hw *); @@ -647,19 +627,17 @@ int csio_is_hw_removing(struct csio_hw *); int csio_fwevtq_handler(struct csio_hw *); void csio_evtq_worker(struct work_struct *); -int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, - void *evt_msg, uint16_t len); +int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t); void csio_evtq_flush(struct csio_hw *hw); int csio_request_irqs(struct csio_hw *); void csio_intr_enable(struct csio_hw *); void csio_intr_disable(struct csio_hw *, bool); +void csio_hw_fatal_err(struct csio_hw *); struct csio_lnode *csio_lnode_alloc(struct csio_hw *); int csio_config_queues(struct csio_hw *); -int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *); -int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *); int csio_hw_init(struct csio_hw *); void csio_hw_exit(struct csio_hw *); #endif /* ifndef __CSIO_HW_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h new file mode 100644 index 00000000000..bca0de61ae8 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw_chip.h @@ -0,0 +1,175 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CSIO_HW_CHIP_H__ +#define __CSIO_HW_CHIP_H__ + +#include "csio_defs.h" + +/* FCoE device IDs for T4 */ +#define CSIO_DEVID_T440DBG_FCOE 0x4600 +#define CSIO_DEVID_T420CR_FCOE 0x4601 +#define CSIO_DEVID_T422CR_FCOE 0x4602 +#define CSIO_DEVID_T440CR_FCOE 0x4603 +#define CSIO_DEVID_T420BCH_FCOE 0x4604 +#define CSIO_DEVID_T440BCH_FCOE 0x4605 +#define CSIO_DEVID_T440CH_FCOE 0x4606 +#define CSIO_DEVID_T420SO_FCOE 0x4607 +#define CSIO_DEVID_T420CX_FCOE 0x4608 +#define CSIO_DEVID_T420BT_FCOE 0x4609 +#define CSIO_DEVID_T404BT_FCOE 0x460A +#define CSIO_DEVID_B420_FCOE 0x460B +#define CSIO_DEVID_B404_FCOE 0x460C +#define CSIO_DEVID_T480CR_FCOE 0x460D +#define CSIO_DEVID_T440LPCR_FCOE 0x460E +#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F +#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680 +#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681 +#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682 +#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683 +#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684 +#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685 +#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686 +#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687 + +/* FCoE device IDs for T5 */ +#define CSIO_DEVID_T580DBG_FCOE 0x5600 +#define CSIO_DEVID_T520CR_FCOE 0x5601 +#define CSIO_DEVID_T522CR_FCOE 0x5602 +#define CSIO_DEVID_T540CR_FCOE 0x5603 +#define CSIO_DEVID_T520BCH_FCOE 0x5604 +#define CSIO_DEVID_T540BCH_FCOE 0x5605 +#define CSIO_DEVID_T540CH_FCOE 0x5606 +#define CSIO_DEVID_T520SO_FCOE 0x5607 +#define CSIO_DEVID_T520CX_FCOE 0x5608 +#define CSIO_DEVID_T520BT_FCOE 0x5609 +#define CSIO_DEVID_T504BT_FCOE 0x560A +#define CSIO_DEVID_B520_FCOE 0x560B +#define CSIO_DEVID_B504_FCOE 0x560C +#define CSIO_DEVID_T580CR2_FCOE 0x560D +#define CSIO_DEVID_T540LPCR_FCOE 0x560E +#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F +#define CSIO_DEVID_T580LPCR_FCOE 0x5610 +#define CSIO_DEVID_T520LLCR_FCOE 0x5611 +#define CSIO_DEVID_T560CR_FCOE 0x5612 +#define CSIO_DEVID_T580CR_FCOE 0x5613 + +/* Define MACRO values */ +#define CSIO_HW_T4 0x4000 +#define CSIO_T4_FCOE_ASIC 0x4600 +#define CSIO_HW_T5 0x5000 +#define CSIO_T5_FCOE_ASIC 0x5600 +#define CSIO_HW_CHIP_MASK 0xF000 +#define T4_REGMAP_SIZE (160 * 1024) +#define T5_REGMAP_SIZE (332 * 1024) +#define FW_FNAME_T4 "cxgb4/t4fw.bin" +#define FW_FNAME_T5 "cxgb4/t5fw.bin" +#define FW_CFG_NAME_T4 "cxgb4/t4-config.txt" +#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt" + +/* Define static functions */ +static inline int csio_is_t4(uint16_t chip) +{ + return (chip == CSIO_HW_T4); +} + +static inline int csio_is_t5(uint16_t chip) +{ + return (chip == CSIO_HW_T5); +} + +/* Define MACRO DEFINITIONS */ +#define CSIO_DEVICE(devid, idx) \ + { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } + +#define CSIO_HW_PIDX(hw, index) \ + (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \ + (PIDX_T5(index) | DBTYPE(1U))) + +#define CSIO_HW_LP_INT_THRESH(hw, val) \ + (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \ + (V_LP_INT_THRESH_T5(val))) + +#define CSIO_HW_M_LP_INT_THRESH(hw) \ + (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5)) + +#define CSIO_MAC_INT_CAUSE_REG(hw, port) \ + (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \ + (T5_PORT_REG(port, MAC_PORT_INT_CAUSE))) + +#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0) +#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0) +#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0) + +#define CSIO_FW_FNAME(hw) \ + (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5) + +#define CSIO_CF_FNAME(hw) \ + (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5) + +/* Declare ENUMS */ +enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; + +enum { + MEMWIN_APERTURE = 2048, + MEMWIN_BASE = 0x1b800, + MEMWIN_CSIOSTOR = 6, /* PCI-e Memory Window access */ +}; + +/* Slow path handlers */ +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/* T4/T5 Chip specific ops */ +struct csio_hw; +struct csio_hw_chip_ops { + int (*chip_set_mem_win)(struct csio_hw *, uint32_t); + void (*chip_pcie_intr_handler)(struct csio_hw *); + uint32_t (*chip_flash_cfg_addr)(struct csio_hw *); + int (*chip_mc_read)(struct csio_hw *, int, uint32_t, + __be32 *, uint64_t *); + int (*chip_edc_read)(struct csio_hw *, int, uint32_t, + __be32 *, uint64_t *); + int (*chip_memory_rw)(struct csio_hw *, u32, int, u32, + u32, uint32_t *, int); + void (*chip_dfs_create_ext_mem)(struct csio_hw *); +}; + +extern struct csio_hw_chip_ops t4_ops; +extern struct csio_hw_chip_ops t5_ops; + +#endif /* #ifndef __CSIO_HW_CHIP_H__ */ diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c new file mode 100644 index 00000000000..89ecbac5478 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw_t4.c @@ -0,0 +1,403 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "csio_hw.h" +#include "csio_init.h" + +/* + * Return the specified PCI-E Configuration Space register from our Physical + * Function. We try first via a Firmware LDST Command since we prefer to let + * the firmware own all of these registers, but if that fails we go for it + * directly ourselves. + */ +static uint32_t +csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg) +{ + u32 val = 0; + struct csio_mb *mbp; + int rv; + struct fw_ldst_cmd *ldst_cmd; + + mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); + if (!mbp) { + CSIO_INC_STATS(hw, n_err_nomem); + pci_read_config_dword(hw->pdev, reg, &val); + return val; + } + + csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg); + rv = csio_mb_issue(hw, mbp); + + /* + * If the LDST Command suucceeded, exctract the returned register + * value. Otherwise read it directly ourself. + */ + if (rv == 0) { + ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb); + val = ntohl(ldst_cmd->u.pcie.data[0]); + } else + pci_read_config_dword(hw->pdev, reg, &val); + + mempool_free(mbp, hw->mb_mempool); + + return val; +} + +static int +csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win) +{ + u32 bar0; + u32 mem_win_base; + + /* + * Truncation intentional: we only read the bottom 32-bits of the + * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to + * read BAR0 instead of using pci_resource_start() because we could be + * operating from within a Virtual Machine which is trapping our + * accesses to our Configuration Space and we need to set up the PCI-E + * Memory Window decoders with the actual addresses which will be + * coming across the PCI-E link. + */ + bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); + bar0 &= PCI_BASE_ADDRESS_MEM_MASK; + + mem_win_base = bar0 + MEMWIN_BASE; + + /* + * Set up memory window for accessing adapter memory ranges. (Read + * back MA register to ensure that changes propagate before we attempt + * to use the new values.) + */ + csio_wr_reg32(hw, mem_win_base | BIR(0) | + WINDOW(ilog2(MEMWIN_APERTURE) - 10), + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); + return 0; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void +csio_t4_pcie_intr_handler(struct csio_hw *hw) +{ + static struct intr_info sysbus_intr_info[] = { + { RNPP, "RXNP array parity error", -1, 1 }, + { RPCP, "RXPC array parity error", -1, 1 }, + { RCIP, "RXCIF array parity error", -1, 1 }, + { RCCP, "Rx completions control array parity error", -1, 1 }, + { RFTP, "RXFT array parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info pcie_port_intr_info[] = { + { TPCP, "TXPC array parity error", -1, 1 }, + { TNPP, "TXNP array parity error", -1, 1 }, + { TFTP, "TXFT array parity error", -1, 1 }, + { TCAP, "TXCA array parity error", -1, 1 }, + { TCIP, "TXCIF array parity error", -1, 1 }, + { RCAP, "RXCA array parity error", -1, 1 }, + { OTDD, "outbound request TLP discarded", -1, 1 }, + { RDPE, "Rx data parity error", -1, 1 }, + { TDUE, "Tx uncorrectable data error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + static struct intr_info pcie_intr_info[] = { + { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, + { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, + { MSIDATAPERR, "MSI data parity error", -1, 1 }, + { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, + { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, + { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, + { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, + { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, + { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, + { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, + { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, + { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR, "PCI FID parity error", -1, 1 }, + { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, + { MATAGPERR, "PCI MA tag parity error", -1, 1 }, + { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, + { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, + { RXWRPERR, "PCI Rx write parity error", -1, 1 }, + { RPLPERR, "PCI replay buffer parity error", -1, 1 }, + { PCIESINT, "PCI core secondary fault", -1, 1 }, + { PCIEPINT, "PCI core primary fault", -1, 1 }, + { UNXSPLCPLERR, "PCI unexpected split completion error", -1, + 0 }, + { 0, NULL, 0, 0 } + }; + + int fat; + fat = csio_handle_intr_status(hw, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + csio_handle_intr_status(hw, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); + if (fat) + csio_hw_fatal_err(hw); +} + +/* + * csio_t4_flash_cfg_addr - return the address of the flash configuration file + * @hw: the HW module + * + * Return the address within the flash where the Firmware Configuration + * File is stored. + */ +static unsigned int +csio_t4_flash_cfg_addr(struct csio_hw *hw) +{ + return FLASH_CFG_OFFSET; +} + +/* + * csio_t4_mc_read - read from MC through backdoor accesses + * @hw: the hw module + * @idx: not used for T4 adapter + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + + if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR); + csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN); + csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN); + csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), + MC_BIST_CMD); + i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST, + 0, 10, 1, NULL); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/* + * csio_t4_edc_read - read from EDC through backdoor accesses + * @hw: the hw module + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + + idx *= EDC_STRIDE; + if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx); + csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx); + csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx); + csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST, + EDC_BIST_CMD + idx); + i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST, + 0, 10, 1, NULL); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +/* + * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window + * @hw: the csio_hw + * @win: PCI-E memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1 + * @addr: address within indicated memory type + * @len: amount of memory to transfer + * @buf: host memory buffer + * @dir: direction of transfer 1 => read, 0 => write + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address, length and host buffer must be aligned on + * 32-bit boudaries. The memory is transferred as a raw byte sequence + * from/to the firmware's memory. If this memory contains data + * structures which contain multi-byte integers, it's the callers + * responsibility to perform appropriate byte order conversions. + */ +static int +csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, + u32 len, uint32_t *buf, int dir) +{ + u32 pos, start, offset, memoffset, bar0; + u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base; + + /* + * Argument sanity checks ... + */ + if ((addr & 0x3) || (len & 0x3)) + return -EINVAL; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- T4 + */ + edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR)); + if (mtype != MEM_MC1) + memoffset = (mtype * (edc_size * 1024 * 1024)); + else { + mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw, + MA_EXT_MEMORY_BAR)); + memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + /* Determine the PCIE_MEM_ACCESS_OFFSET */ + addr = addr + memoffset; + + /* + * Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); + mem_aperture = 1 << (WINDOW(mem_reg) + 10); + mem_base = GET_PCIEOFST(mem_reg) << 10; + + bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0); + bar0 &= PCI_BASE_ADDRESS_MEM_MASK; + mem_base -= bar0; + + start = addr & ~(mem_aperture-1); + offset = addr - start; + + csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n", + mem_reg, mem_aperture); + csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n", + mem_base, memoffset); + csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n", + bar0, start, offset); + csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n", + mtype, addr, len); + + for (pos = start; len > 0; pos += mem_aperture, offset = 0) { + /* + * Move PCI-E Memory Window to our current transfer + * position. Read it back to ensure that changes propagate + * before we attempt to use the new value. + */ + csio_wr_reg32(hw, pos, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); + + while (offset < mem_aperture && len > 0) { + if (dir) + *buf++ = csio_rd_reg32(hw, mem_base + offset); + else + csio_wr_reg32(hw, *buf++, mem_base + offset); + + offset += sizeof(__be32); + len -= sizeof(__be32); + } + } + return 0; +} + +/* + * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values + * @hw: the csio_hw + * + * This function creates files in the debugfs with external memory region MC. + */ +static void +csio_t4_dfs_create_ext_mem(struct csio_hw *hw) +{ + u32 size; + int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); + if (i & EXT_MEM_ENABLE) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR); + csio_add_debugfs_mem(hw, "mc", MEM_MC, + EXT_MEM_SIZE_GET(size)); + } +} + +/* T4 adapter specific function */ +struct csio_hw_chip_ops t4_ops = { + .chip_set_mem_win = csio_t4_set_mem_win, + .chip_pcie_intr_handler = csio_t4_pcie_intr_handler, + .chip_flash_cfg_addr = csio_t4_flash_cfg_addr, + .chip_mc_read = csio_t4_mc_read, + .chip_edc_read = csio_t4_edc_read, + .chip_memory_rw = csio_t4_memory_rw, + .chip_dfs_create_ext_mem = csio_t4_dfs_create_ext_mem, +}; diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c new file mode 100644 index 00000000000..27745c170c2 --- /dev/null +++ b/drivers/scsi/csiostor/csio_hw_t5.c @@ -0,0 +1,397 @@ +/* + * This file is part of the Chelsio FCoE driver for Linux. + * + * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "csio_hw.h" +#include "csio_init.h" + +static int +csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win) +{ + u32 mem_win_base; + /* + * Truncation intentional: we only read the bottom 32-bits of the + * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to + * read BAR0 instead of using pci_resource_start() because we could be + * operating from within a Virtual Machine which is trapping our + * accesses to our Configuration Space and we need to set up the PCI-E + * Memory Window decoders with the actual addresses which will be + * coming across the PCI-E link. + */ + + /* For T5, only relative offset inside the PCIe BAR is passed */ + mem_win_base = MEMWIN_BASE; + + /* + * Set up memory window for accessing adapter memory ranges. (Read + * back MA register to ensure that changes propagate before we attempt + * to use the new values.) + */ + csio_wr_reg32(hw, mem_win_base | BIR(0) | + WINDOW(ilog2(MEMWIN_APERTURE) - 10), + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); + + return 0; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void +csio_t5_pcie_intr_handler(struct csio_hw *hw) +{ + static struct intr_info sysbus_intr_info[] = { + { RNPP, "RXNP array parity error", -1, 1 }, + { RPCP, "RXPC array parity error", -1, 1 }, + { RCIP, "RXCIF array parity error", -1, 1 }, + { RCCP, "Rx completions control array parity error", -1, 1 }, + { RFTP, "RXFT array parity error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + static struct intr_info pcie_port_intr_info[] = { + { TPCP, "TXPC array parity error", -1, 1 }, + { TNPP, "TXNP array parity error", -1, 1 }, + { TFTP, "TXFT array parity error", -1, 1 }, + { TCAP, "TXCA array parity error", -1, 1 }, + { TCIP, "TXCIF array parity error", -1, 1 }, + { RCAP, "RXCA array parity error", -1, 1 }, + { OTDD, "outbound request TLP discarded", -1, 1 }, + { RDPE, "Rx data parity error", -1, 1 }, + { TDUE, "Tx uncorrectable data error", -1, 1 }, + { 0, NULL, 0, 0 } + }; + + static struct intr_info pcie_intr_info[] = { + { MSTGRPPERR, "Master Response Read Queue parity error", + -1, 1 }, + { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, + { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, + { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, + { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", + -1, 1 }, + { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", + -1, 1 }, + { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, + { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, + { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, + { DREQWRPERR, "PCI DMA channel write request parity error", + -1, 1 }, + { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, + { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR, "PCI FID parity error", -1, 1 }, + { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, + { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, + { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, + { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", + -1, 1 }, + { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", + -1, 1 }, + { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, + { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, + { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, + { READRSPERR, "Outbound read error", -1, 0 }, + { 0, NULL, 0, 0 } + }; + + int fat; + fat = csio_handle_intr_status(hw, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + csio_handle_intr_status(hw, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info); + if (fat) + csio_hw_fatal_err(hw); +} + +/* + * csio_t5_flash_cfg_addr - return the address of the flash configuration file + * @hw: the HW module + * + * Return the address within the flash where the Firmware Configuration + * File is stored. + */ +static unsigned int +csio_t5_flash_cfg_addr(struct csio_hw *hw) +{ + return FLASH_CFG_START; +} + +/* + * csio_t5_mc_read - read from MC through backdoor accesses + * @hw: the hw module + * @idx: index to the register + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg; + uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg; + + mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx); + mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx); + mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx); + mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx); + mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx); + + if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg); + csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg); + csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg); + csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), + mc_bist_cmd_reg); + i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST, + 0, 10, 1, NULL); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/* + * csio_t5_edc_read - read from EDC through backdoor accesses + * @hw: the hw module + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +static int +csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data, + uint64_t *ecc) +{ + int i; + uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg; + uint32_t edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg; + +/* + * These macro are missing in t4_regs.h file. + */ +#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) +#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx) + + edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx); + edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); + edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); + edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); + edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); +#undef EDC_REG_T5 +#undef EDC_STRIDE_T5 + + if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST) + return -EBUSY; + csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg); + csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg); + csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern); + csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1), + edc_bist_cmd_reg); + i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST, + 0, 10, 1, NULL); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i))); + if (ecc) + *ecc = csio_rd_reg64(hw, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +/* + * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window + * @hw: the csio_hw + * @win: PCI-E memory Window to use + * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1 + * @addr: address within indicated memory type + * @len: amount of memory to transfer + * @buf: host memory buffer + * @dir: direction of transfer 1 => read, 0 => write + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address, length and host buffer must be aligned on + * 32-bit boudaries. The memory is transferred as a raw byte sequence + * from/to the firmware's memory. If this memory contains data + * structures which contain multi-byte integers, it's the callers + * responsibility to perform appropriate byte order conversions. + */ +static int +csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr, + u32 len, uint32_t *buf, int dir) +{ + u32 pos, start, offset, memoffset; + u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + + /* + * Argument sanity checks ... + */ + if ((addr & 0x3) || (len & 0x3)) + return -EINVAL; + + /* Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 -- T4 + * MEM_MC0 = 2 -- For T5 + * MEM_MC1 = 3 -- For T5 + */ + edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR)); + if (mtype != MEM_MC1) + memoffset = (mtype * (edc_size * 1024 * 1024)); + else { + mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw, + MA_EXT_MEMORY_BAR)); + memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; + } + + /* Determine the PCIE_MEM_ACCESS_OFFSET */ + addr = addr + memoffset; + + /* + * Each PCI-E Memory Window is programmed with a window size -- or + * "aperture" -- which controls the granularity of its mapping onto + * adapter memory. We need to grab that aperture in order to know + * how to use the specified window. The window is also programmed + * with the base address of the Memory Window in BAR0's address + * space. For T4 this is an absolute PCI-E Bus Address. For T5 + * the address is relative to BAR0. + */ + mem_reg = csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win)); + mem_aperture = 1 << (WINDOW(mem_reg) + 10); + mem_base = GET_PCIEOFST(mem_reg) << 10; + + start = addr & ~(mem_aperture-1); + offset = addr - start; + win_pf = V_PFNUM(hw->pfn); + + csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n", + mem_reg, mem_aperture); + csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n", + mem_base, memoffset); + csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n", + start, offset, win_pf); + csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n", + mtype, addr, len); + + for (pos = start; len > 0; pos += mem_aperture, offset = 0) { + /* + * Move PCI-E Memory Window to our current transfer + * position. Read it back to ensure that changes propagate + * before we attempt to use the new value. + */ + csio_wr_reg32(hw, pos | win_pf, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); + csio_rd_reg32(hw, + PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); + + while (offset < mem_aperture && len > 0) { + if (dir) + *buf++ = csio_rd_reg32(hw, mem_base + offset); + else + csio_wr_reg32(hw, *buf++, mem_base + offset); + + offset += sizeof(__be32); + len -= sizeof(__be32); + } + } + return 0; +} + +/* + * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values + * @hw: the csio_hw + * + * This function creates files in the debugfs with external memory region + * MC0 & MC1. + */ +static void +csio_t5_dfs_create_ext_mem(struct csio_hw *hw) +{ + u32 size; + int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE); + if (i & EXT_MEM_ENABLE) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR); + csio_add_debugfs_mem(hw, "mc0", MEM_MC0, + EXT_MEM_SIZE_GET(size)); + } + if (i & EXT_MEM1_ENABLE) { + size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR); + csio_add_debugfs_mem(hw, "mc1", MEM_MC1, + EXT_MEM_SIZE_GET(size)); + } +} + +/* T5 adapter specific function */ +struct csio_hw_chip_ops t5_ops = { + .chip_set_mem_win = csio_t5_set_mem_win, + .chip_pcie_intr_handler = csio_t5_pcie_intr_handler, + .chip_flash_cfg_addr = csio_t5_flash_cfg_addr, + .chip_mc_read = csio_t5_mc_read, + .chip_edc_read = csio_t5_edc_read, + .chip_memory_rw = csio_t5_memory_rw, + .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem, +}; diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 0604b5ff363..1aafc331ee6 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -81,9 +81,11 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) __be32 data[16]; if (mem == MEM_MC) - ret = csio_hw_mc_read(hw, pos, data, NULL); + ret = hw->chip_ops->chip_mc_read(hw, 0, pos, + data, NULL); else - ret = csio_hw_edc_read(hw, mem, pos, data, NULL); + ret = hw->chip_ops->chip_edc_read(hw, mem, pos, + data, NULL); if (ret) return ret; @@ -108,7 +110,7 @@ static const struct file_operations csio_mem_debugfs_fops = { .llseek = default_llseek, }; -static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, +void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, unsigned int idx, unsigned int size_mb) { struct dentry *de; @@ -131,9 +133,8 @@ static int csio_setup_debugfs(struct csio_hw *hw) csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); if (i & EDRAM1_ENABLE) csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); - if (i & EXT_MEM_ENABLE) - csio_add_debugfs_mem(hw, "mc", MEM_MC, - EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR))); + + hw->chip_ops->chip_dfs_create_ext_mem(hw); return 0; } @@ -1009,7 +1010,6 @@ err_lnode_exit: csio_hw_stop(hw); spin_unlock_irq(&hw->lock); csio_lnodes_unblock_request(hw); - pci_set_drvdata(hw->pdev, NULL); csio_lnodes_exit(hw, 0); csio_hw_free(hw); err_pci_exit: @@ -1043,7 +1043,6 @@ static void csio_remove_one(struct pci_dev *pdev) csio_lnodes_exit(hw, 0); csio_hw_free(hw); - pci_set_drvdata(pdev, NULL); csio_pci_exit(pdev, &bars); } @@ -1169,7 +1168,7 @@ static struct pci_error_handlers csio_err_handler = { }; static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { - CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */ + CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */ CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */ @@ -1184,8 +1183,34 @@ static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */ CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */ CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */ - CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */ - CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */ + CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */ + CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */ + CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */ + CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */ + CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */ + CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */ + CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */ + CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */ + CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */ + CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */ + CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */ + CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */ + CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */ + CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */ + CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */ + CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */ + CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */ + CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */ + CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */ + CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */ + CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */ { 0, 0, 0, 0, 0, 0, 0 } }; @@ -1259,4 +1284,5 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC); MODULE_LICENSE(CSIO_DRV_LICENSE); MODULE_DEVICE_TABLE(pci, csio_pci_tbl); MODULE_VERSION(CSIO_DRV_VERSION); -MODULE_FIRMWARE(CSIO_FW_FNAME); +MODULE_FIRMWARE(FW_FNAME_T4); +MODULE_FIRMWARE(FW_FNAME_T5); diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h index 0838fd7ec9c..5cc5d317a44 100644 --- a/drivers/scsi/csiostor/csio_init.h +++ b/drivers/scsi/csiostor/csio_init.h @@ -52,31 +52,6 @@ #define CSIO_DRV_DESC "Chelsio FCoE driver" #define CSIO_DRV_VERSION "1.0.0" -#define CSIO_DEVICE(devid, idx) \ -{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } - -#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\ - ((_dev) == CSIO_DEVID_PE10K_PF1)) - -/* FCoE device IDs */ -#define CSIO_DEVID_PE10K 0xA000 -#define CSIO_DEVID_PE10K_PF1 0xA001 -#define CSIO_DEVID_T440DBG_FCOE 0x4600 -#define CSIO_DEVID_T420CR_FCOE 0x4601 -#define CSIO_DEVID_T422CR_FCOE 0x4602 -#define CSIO_DEVID_T440CR_FCOE 0x4603 -#define CSIO_DEVID_T420BCH_FCOE 0x4604 -#define CSIO_DEVID_T440BCH_FCOE 0x4605 -#define CSIO_DEVID_T440CH_FCOE 0x4606 -#define CSIO_DEVID_T420SO_FCOE 0x4607 -#define CSIO_DEVID_T420CX_FCOE 0x4608 -#define CSIO_DEVID_T420BT_FCOE 0x4609 -#define CSIO_DEVID_T404BT_FCOE 0x460A -#define CSIO_DEVID_B420_FCOE 0x460B -#define CSIO_DEVID_B404_FCOE 0x460C -#define CSIO_DEVID_T480CR_FCOE 0x460D -#define CSIO_DEVID_T440LPCR_FCOE 0x460E - extern struct fc_function_template csio_fc_transport_funcs; extern struct fc_function_template csio_fc_transport_vport_funcs; @@ -100,6 +75,10 @@ struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool, void csio_shost_exit(struct csio_lnode *); void csio_lnodes_exit(struct csio_hw *, bool); +/* DebugFS helper routines */ +void csio_add_debugfs_mem(struct csio_hw *, const char *, + unsigned int, unsigned int); + static inline struct Scsi_Host * csio_ln_to_shost(struct csio_lnode *ln) { diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h index 8d84988ab06..372a67d122d 100644 --- a/drivers/scsi/csiostor/csio_lnode.h +++ b/drivers/scsi/csiostor/csio_lnode.h @@ -114,7 +114,7 @@ struct csio_lnode_stats { uint32_t n_rnode_match; /* matched rnode */ uint32_t n_dev_loss_tmo; /* Device loss timeout */ uint32_t n_fdmi_err; /* fdmi err */ - uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ uint32_t n_rnode_alloc; /* rnode allocated */ uint32_t n_rnode_free; /* rnode freed */ diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index 5b27c48f683..15b63514254 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c @@ -182,7 +182,7 @@ csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, * @tmo: Command timeout. * @pf: PF number. * @vf: VF number. - * @nparams: Number of paramters + * @nparams: Number of parameters * @params: Parameter mnemonic array. * @val: Parameter value array. * @wr: Write/Read PARAMS. @@ -326,83 +326,6 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET); } -void -csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp, - uint32_t tmo, uint8_t mode, unsigned int flags, - void (*cbfn)(struct csio_hw *, struct csio_mb *)) -{ - struct fw_rss_glb_config_cmd *cmdp = - (struct fw_rss_glb_config_cmd *)(mbp->mb); - - CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - - cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | - FW_CMD_REQUEST | FW_CMD_WRITE); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); - - if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { - cmdp->u.manual.mode_pkd = - htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); - } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { - cmdp->u.basicvirtual.mode_pkd = - htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); - cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); - } -} - - -/* - * csio_mb_pfvf - FW Write PF/VF capabilities command helper. - * @hw: The HW structure - * @mbp: Mailbox structure - * @pf: - * @vf: - * @txq: - * @txq_eht_ctrl: - * @rxqi: - * @rxq: - * @tc: - * @vi: - * @pmask: - * @rcaps: - * @wxcaps: - * @cbfn: Callback, if any. - * - */ -void -csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, - unsigned int pf, unsigned int vf, unsigned int txq, - unsigned int txq_eth_ctrl, unsigned int rxqi, - unsigned int rxq, unsigned int tc, unsigned int vi, - unsigned int cmask, unsigned int pmask, unsigned int nexactf, - unsigned int rcaps, unsigned int wxcaps, - void (*cbfn) (struct csio_hw *, struct csio_mb *)) -{ - struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb); - - CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1); - - cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - FW_PFVF_CMD_PFN(pf) | - FW_PFVF_CMD_VFN(vf)); - cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16)); - cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | - FW_PFVF_CMD_NIQ(rxq)); - - cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE | - FW_PFVF_CMD_CMASK(cmask) | - FW_PFVF_CMD_PMASK(pmask) | - FW_PFVF_CMD_NEQ(txq)); - cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | - FW_PFVF_CMD_NVI(vi) | - FW_PFVF_CMD_NEXACTF(nexactf)); - cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | - FW_PFVF_CMD_WX_CAPS(wxcaps) | - FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); -} - #define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) @@ -721,7 +644,7 @@ csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv, * @mbp: Mailbox structure to initialize * @priv: Private data * @mb_tmo: Mailbox time-out period (in ms). - * @eq_ofld_params: (Offload) Egress queue paramters. + * @eq_ofld_params: (Offload) Egress queue parameters. * @cbfn: The call-back function * * @@ -752,7 +675,7 @@ csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv, * @priv: Private data * @mb_tmo: Mailbox time-out period (in ms). * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request. - * @eq_ofld_params: (Offload) Egress queue paramters. + * @eq_ofld_params: (Offload) Egress queue parameters. * @cbfn: The call-back function * * @@ -817,7 +740,7 @@ csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv, * @mbp: Mailbox structure to initialize * @priv: Private data. * @mb_tmo: Mailbox time-out period (in ms). - * @eq_ofld_params: (Offload) Egress queue paramters. + * @eq_ofld_params: (Offload) Egress queue parameters. * @cbfn: The call-back function * * @@ -840,7 +763,7 @@ csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, * @hw: The HW structure. * @mbp: Mailbox structure to initialize. * @retval: Firmware return value. - * @eq_ofld_params: (Offload) Egress queue paramters. + * @eq_ofld_params: (Offload) Egress queue parameters. * */ void @@ -870,7 +793,7 @@ csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw, * @mbp: Mailbox structure to initialize * @priv: Private data area. * @mb_tmo: Mailbox time-out period (in ms). - * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed. + * @eq_ofld_params: (Offload) Egress queue parameters, that is to be freed. * @cbfn: The call-back function * * diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h index 1788ea506f3..a84179e54ab 100644 --- a/drivers/scsi/csiostor/csio_mb.h +++ b/drivers/scsi/csiostor/csio_mb.h @@ -183,17 +183,6 @@ void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t, bool, bool, bool, bool, void (*)(struct csio_hw *, struct csio_mb *)); -void csio_rss_glb_config(struct csio_hw *, struct csio_mb *, - uint32_t, uint8_t, unsigned int, - void (*)(struct csio_hw *, struct csio_mb *)); - -void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t, - unsigned int, unsigned int, unsigned int, - unsigned int, unsigned int, unsigned int, - unsigned int, unsigned int, unsigned int, - unsigned int, unsigned int, unsigned int, - unsigned int, void (*) (struct csio_hw *, struct csio_mb *)); - void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t, uint8_t, bool, uint32_t, uint16_t, void (*) (struct csio_hw *, struct csio_mb *)); diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c index 51c6a388de2..e9c3b045f58 100644 --- a/drivers/scsi/csiostor/csio_rnode.c +++ b/drivers/scsi/csiostor/csio_rnode.c @@ -302,7 +302,7 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid, { uint8_t rport_type; struct csio_rnode *rn, *match_rn; - uint32_t vnp_flowid; + uint32_t vnp_flowid = 0; __be32 *port_id; port_id = (__be32 *)&rdevp->r_id[0]; @@ -350,6 +350,14 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid, * Else, go ahead and alloc a new rnode. */ if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) { + if (rn == match_rn) + goto found_rnode; + csio_ln_dbg(ln, + "nport_id:x%x and wwpn:%llx" + " match for ssni:x%x\n", + rn->nport_id, + wwn_to_u64(rdevp->wwpn), + rdev_flowid); if (csio_is_rnode_ready(rn)) { csio_ln_warn(ln, "rnode is already" diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h index a3b434c801d..43343422122 100644 --- a/drivers/scsi/csiostor/csio_rnode.h +++ b/drivers/scsi/csiostor/csio_rnode.h @@ -63,7 +63,7 @@ struct csio_rnode_stats { uint32_t n_err_nomem; /* error nomem */ uint32_t n_evt_unexp; /* unexpected event */ uint32_t n_evt_drop; /* unexpected event */ - uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ uint32_t n_lun_rst; /* Number of resets of * of LUNs under this diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index ddd38e5eb0e..7494e4bc69c 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1479,8 +1479,8 @@ csio_store_dbg_level(struct device *dev, } static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); -static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset); -static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port); +static DEVICE_ATTR(device_reset, S_IWUSR, NULL, csio_device_reset); +static DEVICE_ATTR(disable_port, S_IWUSR, NULL, csio_disable_port); static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, csio_store_dbg_level); diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index c32df1bdaa9..4255ce264ab 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -85,8 +85,8 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq) */ if (flq->inc_idx >= 8) { csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) | - PIDX(flq->inc_idx / 8), - MYPF_REG(SGE_PF_KDOORBELL)); + CSIO_HW_PIDX(hw, flq->inc_idx / 8), + MYPF_REG(SGE_PF_KDOORBELL)); flq->inc_idx &= 7; } } @@ -989,7 +989,8 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio) wmb(); /* Ring SGE Doorbell writing q->pidx into it */ csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) | - PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL)); + CSIO_HW_PIDX(hw, q->inc_idx), + MYPF_REG(SGE_PF_KDOORBELL)); q->inc_idx = 0; return 0; @@ -1331,20 +1332,30 @@ csio_wr_fixup_host_params(struct csio_hw *hw) /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0); - csio_wr_reg32(hw, - (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) + - sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), - SGE_FL_BUFFER_SIZE2); - csio_wr_reg32(hw, - (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) + - sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), - SGE_FL_BUFFER_SIZE3); + + /* + * If using hard params, the following will get set correctly + * in csio_wr_set_sge(). + */ + if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { + csio_wr_reg32(hw, + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) + + sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), + SGE_FL_BUFFER_SIZE2); + csio_wr_reg32(hw, + (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) + + sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), + SGE_FL_BUFFER_SIZE3); + } csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ); /* default value of rx_dma_offset of the NIC driver */ csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK, PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET)); + + csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG, + CSUM_HAS_PSEUDO_HDR, 0); } static void @@ -1460,18 +1471,21 @@ csio_wr_set_sge(struct csio_hw *hw) * and generate an interrupt when this occurs so we can recover. */ csio_set_reg_field(hw, SGE_DBFIFO_STATUS, - HP_INT_THRESH(HP_INT_THRESH_MASK) | - LP_INT_THRESH(LP_INT_THRESH_MASK), - HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) | - LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH)); + HP_INT_THRESH(HP_INT_THRESH_MASK) | + CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)), + HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) | + CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH)); + csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP, ENABLE_DROP); /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); - CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2); - CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3); + csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1) + & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2); + csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1) + & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3); CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); @@ -1522,22 +1536,24 @@ void csio_wr_sge_init(struct csio_hw *hw) { /* - * If we are master: + * If we are master and chip is not initialized: * - If we plan to use the config file, we need to fixup some * host specific registers, and read the rest of the SGE * configuration. * - If we dont plan to use the config file, we need to initialize * SGE entirely, including fixing the host specific registers. + * If we are master and chip is initialized, just read and work off of + * the already initialized SGE values. * If we arent the master, we are only allowed to read and work off of * the already initialized SGE values. * * Therefore, before calling this function, we assume that the master- - * ship of the card, and whether to use config file or not, have - * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and - * CSIO_HWF_MASTER should be set/unset. + * ship of the card, state and whether to use config file or not, have + * already been decided. */ if (csio_is_hw_master(hw)) { - csio_wr_fixup_host_params(hw); + if (hw->fw_state != CSIO_DEV_STATE_INIT) + csio_wr_fixup_host_params(hw); if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) csio_wr_get_sge(hw); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 3fecf35ba29..e8ee5e5fe0e 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -20,6 +20,7 @@ #include <net/dst.h> #include <linux/netdevice.h> +#include "t4_regs.h" #include "t4_msg.h" #include "cxgb4.h" #include "cxgb4_uld.h" @@ -32,13 +33,12 @@ static unsigned int dbg_level; #include "../libcxgbi.h" #define DRV_MODULE_NAME "cxgb4i" -#define DRV_MODULE_DESC "Chelsio T4 iSCSI Driver" -#define DRV_MODULE_VERSION "0.9.1" -#define DRV_MODULE_RELDATE "Aug. 2010" +#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" +#define DRV_MODULE_VERSION "0.9.4" static char version[] = DRV_MODULE_DESC " " DRV_MODULE_NAME - " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + " v" DRV_MODULE_VERSION "\n"; MODULE_AUTHOR("Chelsio Communications, Inc."); MODULE_DESCRIPTION(DRV_MODULE_DESC); @@ -178,7 +178,7 @@ static inline int is_ofld_imm(const struct sk_buff *skb) static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, struct l2t_entry *e) { - struct cpl_act_open_req *req; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); unsigned long long opt0; unsigned int opt2; @@ -195,29 +195,59 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, RCV_BUFSIZ(cxgb4i_rcv_win >> 10); opt2 = RX_CHANNEL(0) | RSS_QUEUE_VALID | - (1 << 20) | (1 << 22) | + (1 << 20) | RSS_QUEUE(csk->rss_qid); - set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); - req = (struct cpl_act_open_req *)skb->head; + if (is_t4(lldi->adapter_type)) { + struct cpl_act_open_req *req = + (struct cpl_act_open_req *)skb->head; - INIT_TP_WR(req, 0); - OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid)); - req->local_port = csk->saddr.sin_port; - req->peer_port = csk->daddr.sin_port; - req->local_ip = csk->saddr.sin_addr.s_addr; - req->peer_ip = csk->daddr.sin_addr.s_addr; - req->opt0 = cpu_to_be64(opt0); - req->params = 0; - req->opt2 = cpu_to_be32(opt2); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = cpu_to_be32(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t)); + opt2 |= 1 << 22; + req->opt2 = cpu_to_be32(opt2); - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", - csk, &req->local_ip, ntohs(req->local_port), - &req->peer_ip, ntohs(req->peer_port), - csk->atid, csk->rss_qid); + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", + csk, &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->atid, csk->rss_qid); + } else { + struct cpl_t5_act_open_req *req = + (struct cpl_t5_act_open_req *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, + qid_atid)); + req->local_port = csk->saddr.sin_port; + req->peer_port = csk->daddr.sin_port; + req->local_ip = csk->saddr.sin_addr.s_addr; + req->peer_ip = csk->daddr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = cpu_to_be64(V_FILTER_TUPLE( + cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + opt2 |= 1 << 31; + req->opt2 = cpu_to_be32(opt2); + log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, + "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n", + csk, &req->local_ip, ntohs(req->local_port), + &req->peer_ip, ntohs(req->peer_port), + csk->atid, csk->rss_qid); + } + + set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } @@ -358,7 +388,7 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) return DIV_ROUND_UP(skb->len, 8); flits = skb_transport_offset(skb) / 8; cnt = skb_shinfo(skb)->nr_frags; - if (skb->tail != skb->transport_header) + if (skb_tail_pointer(skb) != skb_transport_header(skb)) cnt++; return flits + sgl_len(cnt); } @@ -632,6 +662,7 @@ static void csk_act_open_retry_timer(unsigned long data) { struct sk_buff *skb; struct cxgbi_sock *csk = (struct cxgbi_sock *)data; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", @@ -639,7 +670,10 @@ static void csk_act_open_retry_timer(unsigned long data) cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); - skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); + skb = alloc_wr(is_t4(lldi->adapter_type) ? + sizeof(struct cpl_act_open_req) : + sizeof(struct cpl_t5_act_open_req), + 0, GFP_ATOMIC); if (!skb) cxgbi_sock_fail_act_open(csk, -ENOMEM); else { @@ -871,7 +905,7 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) if (!csk->skb_ulp_lhdr) { unsigned char *bhs; - unsigned int hlen, dlen; + unsigned int hlen, dlen, plen; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n", @@ -890,11 +924,15 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) hlen = ntohs(cpl->len); dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF; - if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) { + plen = ISCSI_PDU_LEN(pdu_len_ddp); + if (is_t4(lldi->adapter_type)) + plen -= 40; + + if ((hlen + dlen) != plen) { pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len " "mismatch %u != %u + %u, seq 0x%x.\n", - csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40, - hlen, dlen, cxgbi_skcb_tcp_seq(skb)); + csk->tid, plen, hlen, dlen, + cxgbi_skcb_tcp_seq(skb)); goto abort_conn; } @@ -1154,7 +1192,10 @@ static int init_act_open(struct cxgbi_sock *csk) } cxgbi_sock_get(csk); - skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); + skb = alloc_wr(is_t4(lldi->adapter_type) ? + sizeof(struct cpl_act_open_req) : + sizeof(struct cpl_t5_act_open_req), + 0, GFP_ATOMIC); if (!skb) goto rel_resource; skb->sk = (struct sock *)csk; @@ -1193,6 +1234,8 @@ rel_resource: return -EINVAL; } +#define CPL_ISCSI_DATA 0xB2 +#define CPL_RX_ISCSI_DDP 0x49 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = do_act_establish, [CPL_ACT_OPEN_RPL] = do_act_open_rpl, @@ -1202,8 +1245,10 @@ cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_CLOSE_CON_RPL] = do_close_con_rpl, [CPL_FW4_ACK] = do_fw4_ack, [CPL_ISCSI_HDR] = do_rx_iscsi_hdr, + [CPL_ISCSI_DATA] = do_rx_iscsi_hdr, [CPL_SET_TCB_RPL] = do_set_tcb_rpl, [CPL_RX_DATA_DDP] = do_rx_data_ddp, + [CPL_RX_ISCSI_DDP] = do_rx_data_ddp, }; int cxgb4i_ofld_init(struct cxgbi_device *cdev) @@ -1234,14 +1279,20 @@ int cxgb4i_ofld_init(struct cxgbi_device *cdev) * functions to program the pagepod in h/w */ #define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */ -static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req, +static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi, + struct ulp_mem_io *req, unsigned int wr_len, unsigned int dlen, unsigned int pm_addr) { struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1); INIT_ULPTX_WR(req, wr_len, 0, 0); - req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23)); + if (is_t4(lldi->adapter_type)) + req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | + (ULP_MEMIO_ORDER(1))); + else + req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | + (V_T5_ULP_MEMIO_IMM(1))); req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5)); req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5)); req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16)); @@ -1257,6 +1308,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, unsigned int gl_pidx) { struct cxgbi_ddp_info *ddp = cdev->ddp; + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct sk_buff *skb; struct ulp_mem_io *req; struct ulptx_idata *idata; @@ -1276,7 +1328,7 @@ static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id, req = (struct ulp_mem_io *)skb->head; set_queue(skb, CPL_PRIORITY_CONTROL, NULL); - ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr); + ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr); idata = (struct ulptx_idata *)(req + 1); ppod = (struct cxgbi_pagepod *)(idata + 1); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 80fa99b3d38..8135f04671a 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -658,11 +658,11 @@ static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat, static inline void *cxgbi_alloc_big_mem(unsigned int size, gfp_t gfp) { - void *p = kmalloc(size, gfp); + void *p = kzalloc(size, gfp | __GFP_NOWARN); + if (!p) - p = vmalloc(size); - if (p) - memset(p, 0, size); + p = vzalloc(size); + return p; } diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index fed486bfd3f..83d9bf6fa6c 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -308,6 +308,8 @@ struct AdapterCtlBlk { struct timer_list waiting_timer; struct timer_list selto_timer; + unsigned long last_reset; + u16 srb_count; u8 sel_timeout; @@ -860,9 +862,9 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to) init_timer(&acb->waiting_timer); acb->waiting_timer.function = waiting_timeout; acb->waiting_timer.data = (unsigned long) acb; - if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2)) + if (time_before(jiffies + to, acb->last_reset - HZ / 2)) acb->waiting_timer.expires = - acb->scsi_host->last_reset - HZ / 2 + 1; + acb->last_reset - HZ / 2 + 1; else acb->waiting_timer.expires = jiffies + to + 1; add_timer(&acb->waiting_timer); @@ -1319,7 +1321,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd) udelay(500); /* We may be in serious trouble. Wait some seconds */ - acb->scsi_host->last_reset = + acb->last_reset = jiffies + 3 * HZ / 2 + HZ * acb->eeprom.delay_time; @@ -1462,9 +1464,9 @@ static void selto_timer(struct AdapterCtlBlk *acb) acb->selto_timer.function = selection_timeout_missed; acb->selto_timer.data = (unsigned long) acb; if (time_before - (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2)) + (jiffies + HZ, acb->last_reset + HZ / 2)) acb->selto_timer.expires = - acb->scsi_host->last_reset + HZ / 2 + 1; + acb->last_reset + HZ / 2 + 1; else acb->selto_timer.expires = jiffies + HZ + 1; add_timer(&acb->selto_timer); @@ -1535,7 +1537,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb, } /* Allow starting of SCSI commands half a second before we allow the mid-level * to queue them again after a reset */ - if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) { + if (time_before(jiffies, acb->last_reset - HZ / 2)) { dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n"); return 1; } @@ -3031,7 +3033,7 @@ static void disconnect(struct AdapterCtlBlk *acb) dprintkl(KERN_ERR, "disconnect: No such device\n"); udelay(500); /* Suspend queue for a while */ - acb->scsi_host->last_reset = + acb->last_reset = jiffies + HZ / 2 + HZ * acb->eeprom.delay_time; clear_fifo(acb, "disconnectEx"); @@ -3053,7 +3055,7 @@ static void disconnect(struct AdapterCtlBlk *acb) waiting_process_next(acb); } else if (srb->state & SRB_ABORT_SENT) { dcb->flag &= ~ABORT_DEV_; - acb->scsi_host->last_reset = jiffies + HZ / 2 + 1; + acb->last_reset = jiffies + HZ / 2 + 1; dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n"); doing_srb_done(acb, DID_ABORT, srb->cmd, 1); waiting_process_next(acb); @@ -3649,7 +3651,7 @@ static void scsi_reset_detect(struct AdapterCtlBlk *acb) /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */ udelay(500); /* Maybe we locked up the bus? Then lets wait even longer ... */ - acb->scsi_host->last_reset = + acb->last_reset = jiffies + 5 * HZ / 2 + HZ * acb->eeprom.delay_time; @@ -4426,7 +4428,7 @@ static void adapter_init_scsi_host(struct Scsi_Host *host) host->dma_channel = -1; host->unique_id = acb->io_port_base; host->irq = acb->irq_level; - host->last_reset = jiffies; + acb->last_reset = jiffies; host->max_id = 16; if (host->max_id - 1 == eeprom->scsi_id) @@ -4484,7 +4486,7 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb) /*spin_unlock_irq (&io_request_lock); */ udelay(500); - acb->scsi_host->last_reset = + acb->last_reset = jiffies + HZ / 2 + HZ * acb->eeprom.delay_time; @@ -4616,26 +4618,21 @@ static void adapter_uninit(struct AdapterCtlBlk *acb) #undef SPRINTF -#define SPRINTF(args...) pos += sprintf(pos, args) +#define SPRINTF(args...) seq_printf(m,##args) #undef YESNO #define YESNO(YN) \ if (YN) SPRINTF(" Yes ");\ else SPRINTF(" No ") -static int dc395x_proc_info(struct Scsi_Host *host, char *buffer, - char **start, off_t offset, int length, int inout) +static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host) { struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata; int spd, spd1; - char *pos = buffer; struct DeviceCtlBlk *dcb; unsigned long flags; int dev; - if (inout) /* Has data been written to the file ? */ - return -EPERM; - SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n"); SPRINTF(" Driver Version " DC395X_VERSION "\n"); @@ -4735,22 +4732,15 @@ static int dc395x_proc_info(struct Scsi_Host *host, char *buffer, SPRINTF("END\n"); } - *start = buffer + offset; DC395x_UNLOCK_IO(acb->scsi_host, flags); - - if (pos - buffer < offset) - return 0; - else if (pos - buffer - offset < length) - return pos - buffer - offset; - else - return length; + return 0; } static struct scsi_host_template dc395x_driver_template = { .module = THIS_MODULE, .proc_name = DC395X_NAME, - .proc_info = dc395x_proc_info, + .show_info = dc395x_show_info, .name = DC395X_BANNER " " DC395X_VERSION, .queuecommand = dc395x_queue_command, .bios_param = dc395x_bios_param, @@ -4871,7 +4861,6 @@ static void dc395x_remove_one(struct pci_dev *dev) adapter_uninit(acb); pci_disable_device(dev); scsi_host_put(scsi_host); - pci_set_drvdata(dev, NULL); } diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 6f4d8e6f32f..7bcf67eec92 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -120,6 +120,7 @@ static struct request *get_alua_req(struct scsi_device *sdev, "%s: blk_get_request failed\n", __func__); return NULL; } + blk_rq_set_block_pc(rq); if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { blk_put_request(rq); @@ -128,7 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev, return NULL; } - rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = ALUA_FAILOVER_RETRIES; @@ -232,13 +232,13 @@ static void stpg_endio(struct request *req, int error) struct scsi_sense_hdr sense_hdr; unsigned err = SCSI_DH_OK; - if (error || host_byte(req->errors) != DID_OK || - msg_byte(req->errors) != COMMAND_COMPLETE) { + if (host_byte(req->errors) != DID_OK || + msg_byte(req->errors) != COMMAND_COMPLETE) { err = SCSI_DH_IO; goto done; } - if (h->senselen > 0) { + if (req->sense_len > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!err) { @@ -255,7 +255,9 @@ static void stpg_endio(struct request *req, int error) ALUA_DH_NAME, sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); err = SCSI_DH_IO; - } + } else if (error) + err = SCSI_DH_IO; + if (err == SCSI_DH_OK) { h->state = TPGS_STATE_OPTIMIZED; sdev_printk(KERN_INFO, h->sdev, @@ -479,6 +481,11 @@ static int alua_check_sense(struct scsi_device *sdev, * Power On, Reset, or Bus Device Reset, just retry. */ return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) + /* + * Device internal reset + */ + return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) /* * Mode Parameters Changed @@ -515,12 +522,13 @@ static int alua_check_sense(struct scsi_device *sdev, /* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. + * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ -static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) +static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition) { struct scsi_sense_hdr sense_hdr; int len, k, off, valid_states = 0; @@ -592,7 +600,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) else h->transition_tmo = ALUA_FAILOVER_TIMEOUT; - if (orig_transition_tmo != h->transition_tmo) { + if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", ALUA_DH_NAME, h->transition_tmo); @@ -630,14 +638,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) switch (h->state) { case TPGS_STATE_TRANSITIONING: - if (time_before(jiffies, expiry)) { - /* State transition, retry */ - interval += 2000; - msleep(interval); - goto retry; + if (wait_for_transition) { + if (time_before(jiffies, expiry)) { + /* State transition, retry */ + interval += 2000; + msleep(interval); + goto retry; + } + err = SCSI_DH_RETRY; + } else { + err = SCSI_DH_OK; } + /* Transitioning time exceeded, set port to standby */ - err = SCSI_DH_RETRY; h->state = TPGS_STATE_STANDBY; break; case TPGS_STATE_OFFLINE: @@ -671,7 +684,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) if (err != SCSI_DH_OK) goto out; - err = alua_rtpg(sdev, h); + err = alua_rtpg(sdev, h, 0); if (err != SCSI_DH_OK) goto out; @@ -710,6 +723,10 @@ static int alua_set_params(struct scsi_device *sdev, const char *params) return result; } +static uint optimize_stpg; +module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); + /* * alua_activate - activate a path * @sdev: device on the path to be activated @@ -727,10 +744,13 @@ static int alua_activate(struct scsi_device *sdev, int err = SCSI_DH_OK; int stpg = 0; - err = alua_rtpg(sdev, h); + err = alua_rtpg(sdev, h, 1); if (err != SCSI_DH_OK) goto out; + if (optimize_stpg) + h->flags |= ALUA_OPTIMIZE_STPG; + if (h->tpgs & TPGS_MODE_EXPLICIT) { switch (h->state) { case TPGS_STATE_NONOPTIMIZED: diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index e1c8be06de9..6f07f7fe3aa 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -280,6 +280,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, return NULL; } + blk_rq_set_block_pc(rq); rq->cmd_len = COMMAND_SIZE(cmd); rq->cmd[0] = cmd; @@ -304,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, break; } - rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->timeout = CLARIION_TIMEOUT; diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 084062bb8ee..e9d9fea9e27 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -120,7 +120,7 @@ retry: if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); @@ -250,7 +250,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h) if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(START_STOP); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 69c915aa77c..826069db984 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -279,6 +279,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev, "get_rdac_req: blk_get_request failed.\n"); return NULL; } + blk_rq_set_block_pc(rq); if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { blk_put_request(rq); @@ -287,7 +288,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev, return NULL; } - rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = RDAC_RETRIES; @@ -786,6 +786,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"IBM", "1742"}, {"IBM", "1745"}, {"IBM", "1746"}, + {"IBM", "1813"}, {"IBM", "1814"}, {"IBM", "1815"}, {"IBM", "1818"}, diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index b6e2700ec1c..c0ae8fa57a3 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -448,19 +448,8 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd } rmb(); - /* - * TODO: I need to block here if I am processing ioctl cmds - * but if the outstanding cmds all finish before the ioctl, - * the scsi-core will not know to start sending cmds to me again. - * I need to a way to restart the scsi-cores queues or should I block - * calling scsi_done on the outstanding cmds instead - * for now we don't set the IOCTL state - */ - if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) { - pHba->host->last_reset = jiffies; - pHba->host->resetting = 1; - return 1; - } + if ((pHba->state) & DPTI_STATE_RESET) + return SCSI_MLQUEUE_HOST_BUSY; // TODO if the cmd->device if offline then I may need to issue a bus rescan // followed by a get_lct to see if the device is there anymore @@ -553,36 +542,14 @@ static const char *adpt_info(struct Scsi_Host *host) return (char *) (pHba->detail); } -static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, - int length, int inout) +static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host) { struct adpt_device* d; int id; int chan; - int len = 0; - int begin = 0; - int pos = 0; adpt_hba* pHba; int unit; - *start = buffer; - if (inout == TRUE) { - /* - * The user has done a write and wants us to take the - * data in the buffer and do something with it. - * proc_scsiwrite calls us with inout = 1 - * - * Read data from buffer (writing to us) - NOT SUPPORTED - */ - return -EINVAL; - } - - /* - * inout = 0 means the user has done a read and wants information - * returned, so we write information about the cards into the buffer - * proc_scsiread() calls us with inout = 0 - */ - // Find HBA (host bus adapter) we are looking for mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { @@ -596,86 +563,30 @@ static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, of } host = pHba->host; - len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION); - len += sprintf(buffer+len, "%s\n", pHba->detail); - len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n", + seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION); + seq_printf(m, "%s\n", pHba->detail); + seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n", pHba->host->host_no, pHba->name, host->irq); - len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", + seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); - pos = begin + len; - - /* CHECKPOINT */ - if(pos > offset + length) { - goto stop_output; - } - if(pos <= offset) { - /* - * If we haven't even written to where we last left - * off (the last time we were called), reset the - * beginning pointer. - */ - len = 0; - begin = pos; - } - len += sprintf(buffer+len, "Devices:\n"); + seq_printf(m, "Devices:\n"); for(chan = 0; chan < MAX_CHANNEL; chan++) { for(id = 0; id < MAX_ID; id++) { d = pHba->channel[chan].device[id]; - while(d){ - len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor); - len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev); - pos = begin + len; - - - /* CHECKPOINT */ - if(pos > offset + length) { - goto stop_output; - } - if(pos <= offset) { - len = 0; - begin = pos; - } + while(d) { + seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor); + seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev); unit = d->pI2o_dev->lct_data.tid; - len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n", + seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n", unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun, scsi_device_online(d->pScsi_dev)? "online":"offline"); - pos = begin + len; - - /* CHECKPOINT */ - if(pos > offset + length) { - goto stop_output; - } - if(pos <= offset) { - len = 0; - begin = pos; - } - d = d->next_lun; } } } - - /* - * begin is where we last checked our position with regards to offset - * begin is always less than offset. len is relative to begin. It - * is the number of bytes written past begin - * - */ -stop_output: - /* stop the output and calculate the correct length */ - *(buffer + len) = '\0'; - - *start = buffer + (offset - begin); /* Start of wanted data */ - len -= (offset - begin); - if(len > length) { - len = length; - } else if(len < 0){ - len = 0; - **start = '\0'; - } - return len; + return 0; } /* @@ -1889,21 +1800,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) } do { - if(pHba->host) + /* + * Stop any new commands from enterring the + * controller while processing the ioctl + */ + if (pHba->host) { + scsi_block_requests(pHba->host); spin_lock_irqsave(pHba->host->host_lock, flags); - // This state stops any new commands from enterring the - // controller while processing the ioctl -// pHba->state |= DPTI_STATE_IOCTL; -// We can't set this now - The scsi subsystem sets host_blocked and -// the queue empties and stops. We need a way to restart the queue + } rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); if (rcode != 0) printk("adpt_i2o_passthru: post wait failed %d %p\n", rcode, reply); -// pHba->state &= ~DPTI_STATE_IOCTL; - if(pHba->host) + if (pHba->host) { spin_unlock_irqrestore(pHba->host->host_lock, flags); - } while(rcode == -ETIMEDOUT); + scsi_unblock_requests(pHba->host); + } + } while (rcode == -ETIMEDOUT); if(rcode){ goto cleanup; @@ -3639,7 +3552,7 @@ static struct scsi_host_template driver_template = { .module = THIS_MODULE, .name = "dpt_i2o", .proc_name = "dpt_i2o", - .proc_info = adpt_proc_info, + .show_info = adpt_show_info, .info = adpt_info, .queuecommand = adpt_queue, .eh_abort_handler = adpt_abort, diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index beded716f93..aeb046186c8 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h @@ -202,7 +202,6 @@ struct adpt_channel { // HBA state flags #define DPTI_STATE_RESET (0x01) -#define DPTI_STATE_IOCTL (0x02) typedef struct _adpt_hba { struct _adpt_hba *next; diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index 4b11bb04f5c..0a667fe0500 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -3,8 +3,6 @@ #define PSEUDO_DMA #define DONT_USE_INTR #define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ -#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\ - NDEBUG_SELECTION+NDEBUG_ARBITRATION) #define DMA_WORKS_RIGHT @@ -216,7 +214,8 @@ static int __init dtc_detect(struct scsi_host_template * tpnt) int sig, count; tpnt->proc_name = "dtc3x80"; - tpnt->proc_info = &dtc_proc_info; + tpnt->show_info = dtc_show_info; + tpnt->write_info = dtc_write_info; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { addr = 0; @@ -276,7 +275,7 @@ found: /* With interrupts enabled, it will sometimes hang when doing heavy * reads. So better not enable them until I finger it out. */ if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, + if (request_irq(instance->irq, dtc_intr, 0, "dtc", instance)) { printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index cdc621204b6..92d7cfc3f4f 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h @@ -88,7 +88,8 @@ static int dtc_bus_reset(Scsi_Cmnd *); #define NCR5380_queue_command dtc_queue_command #define NCR5380_abort dtc_abort #define NCR5380_bus_reset dtc_bus_reset -#define NCR5380_proc_info dtc_proc_info +#define NCR5380_show_info dtc_show_info +#define NCR5380_write_info dtc_write_info /* 15 12 11 10 1001 1100 0000 0000 */ diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index 94de88955a9..ebf57364df9 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -1221,7 +1221,7 @@ static int port_detect(unsigned long port_base, unsigned int j, /* Board detected, allocate its IRQ */ if (request_irq(irq, do_interrupt_handler, - IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), + (subversion == ESA) ? IRQF_SHARED : 0, driver_name, (void *)&sha[j])) { printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq); diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index d5f8362335d..8319d2b417b 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c @@ -92,58 +92,22 @@ static unsigned long queue_counter; static struct scsi_host_template driver_template; -/* - * eata_proc_info - * inout : decides on the direction of the dataflow and the meaning of the - * variables - * buffer: If inout==FALSE data is being written to it else read from it - * *start: If inout==FALSE start of the valid data in the buffer - * offset: If inout==FALSE offset from the beginning of the imaginary file - * from which we start writing into the buffer - * length: If inout==FALSE max number of bytes to be written into the buffer - * else number of bytes in the buffer - */ -static int eata_pio_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, - int length, int rw) +static int eata_pio_show_info(struct seq_file *m, struct Scsi_Host *shost) { - int len = 0; - off_t begin = 0, pos = 0; - - if (rw) - return -ENOSYS; - - len += sprintf(buffer+len, "EATA (Extended Attachment) PIO driver version: " + seq_printf(m, "EATA (Extended Attachment) PIO driver version: " "%d.%d%s\n",VER_MAJOR, VER_MINOR, VER_SUB); - len += sprintf(buffer + len, "queued commands: %10ld\n" + seq_printf(m, "queued commands: %10ld\n" "processed interrupts:%10ld\n", queue_counter, int_counter); - len += sprintf(buffer + len, "\nscsi%-2d: HBA %.10s\n", + seq_printf(m, "\nscsi%-2d: HBA %.10s\n", shost->host_no, SD(shost)->name); - len += sprintf(buffer + len, "Firmware revision: v%s\n", + seq_printf(m, "Firmware revision: v%s\n", SD(shost)->revision); - len += sprintf(buffer + len, "IO: PIO\n"); - len += sprintf(buffer + len, "Base IO : %#.4x\n", (u32) shost->base); - len += sprintf(buffer + len, "Host Bus: %s\n", + seq_printf(m, "IO: PIO\n"); + seq_printf(m, "Base IO : %#.4x\n", (u32) shost->base); + seq_printf(m, "Host Bus: %s\n", (SD(shost)->bustype == 'P')?"PCI ": (SD(shost)->bustype == 'E')?"EISA":"ISA "); - - pos = begin + len; - - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) - goto stop_output; - -stop_output: - DBG(DBG_PROC, printk("2pos: %ld offset: %ld len: %d\n", pos, offset, len)); - *start = buffer + (offset - begin); /* Start of wanted data */ - len -= (offset - begin); /* Start slop */ - if (len > length) - len = length; /* Ending slop */ - DBG(DBG_PROC, printk("3pos: %ld offset: %ld len: %d\n", pos, offset, len)); - - return len; + return 0; } static int eata_pio_release(struct Scsi_Host *sh) @@ -723,7 +687,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev return 0; if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */ - if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) { + if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) { reg_IRQ[gc->IRQ]++; if (!gc->IRQ_TR) reg_IRQL[gc->IRQ] = 1; /* IRQ is edge triggered */ @@ -955,9 +919,9 @@ static int eata_pio_detect(struct scsi_host_template *tpnt) find_pio_EISA(&gc); find_pio_ISA(&gc); - for (i = 0; i <= MAXIRQ; i++) + for (i = 0; i < MAXIRQ; i++) if (reg_IRQ[i]) - request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL); + request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL); HBA_ptr = first_HBA; @@ -985,7 +949,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt) static struct scsi_host_template driver_template = { .proc_name = "eata_pio", .name = "EATA (Extended Attachment) PIO driver", - .proc_info = eata_pio_proc_info, + .show_info = eata_pio_show_info, .detect = eata_pio_detect, .release = eata_pio_release, .queuecommand = eata_pio_queue, diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig new file mode 100644 index 00000000000..78fdbfd9b4b --- /dev/null +++ b/drivers/scsi/esas2r/Kconfig @@ -0,0 +1,5 @@ +config SCSI_ESAS2R + tristate "ATTO Technology's ExpressSAS RAID adapter driver" + depends on PCI && SCSI + ---help--- + This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers. diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile new file mode 100644 index 00000000000..c77160b8c8b --- /dev/null +++ b/drivers/scsi/esas2r/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o + +esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \ + esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \ + esas2r_vda.o esas2r_main.o diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h new file mode 100644 index 00000000000..4aca3d52c85 --- /dev/null +++ b/drivers/scsi/esas2r/atioctl.h @@ -0,0 +1,1254 @@ +/* linux/drivers/scsi/esas2r/atioctl.h + * ATTO IOCTL Handling + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "atvda.h" + +#ifndef ATIOCTL_H +#define ATIOCTL_H + +#define EXPRESS_IOCTL_SIGNATURE "Express" +#define EXPRESS_IOCTL_SIGNATURE_SIZE 8 + +/* structure definitions for IOCTls */ + +struct __packed atto_express_ioctl_header { + u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE]; + u8 return_code; + +#define IOCTL_SUCCESS 0 +#define IOCTL_ERR_INVCMD 101 +#define IOCTL_INIT_FAILED 102 +#define IOCTL_NOT_IMPLEMENTED 103 +#define IOCTL_BAD_CHANNEL 104 +#define IOCTL_TARGET_OVERRUN 105 +#define IOCTL_TARGET_NOT_ENABLED 106 +#define IOCTL_BAD_FLASH_IMGTYPE 107 +#define IOCTL_OUT_OF_RESOURCES 108 +#define IOCTL_GENERAL_ERROR 109 +#define IOCTL_INVALID_PARAM 110 + + u8 channel; + u8 retries; + u8 pad[5]; +}; + +/* + * NOTE - if channel == 0xFF, the request is + * handled on the adapter it came in on. + */ +#define MAX_NODE_NAMES 256 + +struct __packed atto_firmware_rw_request { + u8 function; + #define FUNC_FW_DOWNLOAD 0x09 + #define FUNC_FW_UPLOAD 0x12 + + u8 img_type; + #define FW_IMG_FW 0x01 + #define FW_IMG_BIOS 0x02 + #define FW_IMG_NVR 0x03 + #define FW_IMG_RAW 0x04 + #define FW_IMG_FM_API 0x05 + #define FW_IMG_FS_API 0x06 + + u8 pad[2]; + u32 img_offset; + u32 img_size; + u8 image[0x80000]; +}; + +struct __packed atto_param_rw_request { + u16 code; + char data_buffer[512]; +}; + +#define MAX_CHANNEL 256 + +struct __packed atto_channel_list { + u32 num_channels; + u8 channel[MAX_CHANNEL]; +}; + +struct __packed atto_channel_info { + u8 major_rev; + u8 minor_rev; + u8 IRQ; + u8 revision_id; + u8 pci_bus; + u8 pci_dev_func; + u8 core_rev; + u8 host_no; + u16 device_id; + u16 vendor_id; + u16 ven_dev_id; + u8 pad[3]; + u32 hbaapi_rev; +}; + +/* + * CSMI control codes + * class independent + */ +#define CSMI_CC_GET_DRVR_INFO 1 +#define CSMI_CC_GET_CNTLR_CFG 2 +#define CSMI_CC_GET_CNTLR_STS 3 +#define CSMI_CC_FW_DOWNLOAD 4 + +/* RAID class */ +#define CSMI_CC_GET_RAID_INFO 10 +#define CSMI_CC_GET_RAID_CFG 11 + +/* HBA class */ +#define CSMI_CC_GET_PHY_INFO 20 +#define CSMI_CC_SET_PHY_INFO 21 +#define CSMI_CC_GET_LINK_ERRORS 22 +#define CSMI_CC_SMP_PASSTHRU 23 +#define CSMI_CC_SSP_PASSTHRU 24 +#define CSMI_CC_STP_PASSTHRU 25 +#define CSMI_CC_GET_SATA_SIG 26 +#define CSMI_CC_GET_SCSI_ADDR 27 +#define CSMI_CC_GET_DEV_ADDR 28 +#define CSMI_CC_TASK_MGT 29 +#define CSMI_CC_GET_CONN_INFO 30 + +/* PHY class */ +#define CSMI_CC_PHY_CTRL 60 + +/* + * CSMI status codes + * class independent + */ +#define CSMI_STS_SUCCESS 0 +#define CSMI_STS_FAILED 1 +#define CSMI_STS_BAD_CTRL_CODE 2 +#define CSMI_STS_INV_PARAM 3 +#define CSMI_STS_WRITE_ATTEMPTED 4 + +/* RAID class */ +#define CSMI_STS_INV_RAID_SET 1000 + +/* HBA class */ +#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS +#define CSMI_STS_PHY_UNCHANGEABLE 2000 +#define CSMI_STS_INV_LINK_RATE 2001 +#define CSMI_STS_INV_PHY 2002 +#define CSMI_STS_INV_PHY_FOR_PORT 2003 +#define CSMI_STS_PHY_UNSELECTABLE 2004 +#define CSMI_STS_SELECT_PHY_OR_PORT 2005 +#define CSMI_STS_INV_PORT 2006 +#define CSMI_STS_PORT_UNSELECTABLE 2007 +#define CSMI_STS_CONNECTION_FAILED 2008 +#define CSMI_STS_NO_SATA_DEV 2009 +#define CSMI_STS_NO_SATA_SIGNATURE 2010 +#define CSMI_STS_SCSI_EMULATION 2011 +#define CSMI_STS_NOT_AN_END_DEV 2012 +#define CSMI_STS_NO_SCSI_ADDR 2013 +#define CSMI_STS_NO_DEV_ADDR 2014 + +/* CSMI class independent structures */ +struct atto_csmi_get_driver_info { + char name[81]; + char description[81]; + u16 major_rev; + u16 minor_rev; + u16 build_rev; + u16 release_rev; + u16 csmi_major_rev; + u16 csmi_minor_rev; + #define CSMI_MAJOR_REV_0_81 0 + #define CSMI_MINOR_REV_0_81 81 + + #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81 + #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81 +}; + +struct atto_csmi_get_pci_bus_addr { + u8 bus_num; + u8 device_num; + u8 function_num; + u8 reserved; +}; + +struct atto_csmi_get_cntlr_cfg { + u32 base_io_addr; + + struct { + u32 base_memaddr_lo; + u32 base_memaddr_hi; + }; + + u32 board_id; + u16 slot_num; + #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF + + u8 cntlr_class; + #define CSMI_CNTLR_CLASS_HBA 5 + + u8 io_bus_type; + #define CSMI_BUS_TYPE_PCI 3 + #define CSMI_BUS_TYPE_PCMCIA 4 + + union { + struct atto_csmi_get_pci_bus_addr pci_addr; + u8 reserved[32]; + }; + + char serial_num[81]; + u16 major_rev; + u16 minor_rev; + u16 build_rev; + u16 release_rev; + u16 bios_major_rev; + u16 bios_minor_rev; + u16 bios_build_rev; + u16 bios_release_rev; + u32 cntlr_flags; + #define CSMI_CNTLRF_SAS_HBA 0x00000001 + #define CSMI_CNTLRF_SAS_RAID 0x00000002 + #define CSMI_CNTLRF_SATA_HBA 0x00000004 + #define CSMI_CNTLRF_SATA_RAID 0x00000008 + #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000 + #define CSMI_CNTLRF_FWD_ONLINE 0x00020000 + #define CSMI_CNTLRF_FWD_SRESET 0x00040000 + #define CSMI_CNTLRF_FWD_HRESET 0x00080000 + #define CSMI_CNTLRF_FWD_RROM 0x00100000 + + u16 rrom_major_rev; + u16 rrom_minor_rev; + u16 rrom_build_rev; + u16 rrom_release_rev; + u16 rrom_biosmajor_rev; + u16 rrom_biosminor_rev; + u16 rrom_biosbuild_rev; + u16 rrom_biosrelease_rev; + u8 reserved2[7]; +}; + +struct atto_csmi_get_cntlr_sts { + u32 status; + #define CSMI_CNTLR_STS_GOOD 1 + #define CSMI_CNTLR_STS_FAILED 2 + #define CSMI_CNTLR_STS_OFFLINE 3 + #define CSMI_CNTLR_STS_POWEROFF 4 + + u32 offline_reason; + #define CSMI_OFFLINE_NO_REASON 0 + #define CSMI_OFFLINE_INITIALIZING 1 + #define CSMI_OFFLINE_BUS_DEGRADED 2 + #define CSMI_OFFLINE_BUS_FAILURE 3 + + u8 reserved[28]; +}; + +struct atto_csmi_fw_download { + u32 buffer_len; + u32 download_flags; + #define CSMI_FWDF_VALIDATE 0x00000001 + #define CSMI_FWDF_SOFT_RESET 0x00000002 + #define CSMI_FWDF_HARD_RESET 0x00000004 + + u8 reserved[32]; + u16 status; + #define CSMI_FWD_STS_SUCCESS 0 + #define CSMI_FWD_STS_FAILED 1 + #define CSMI_FWD_STS_USING_RROM 2 + #define CSMI_FWD_STS_REJECT 3 + #define CSMI_FWD_STS_DOWNREV 4 + + u16 severity; + #define CSMI_FWD_SEV_INFO 0 + #define CSMI_FWD_SEV_WARNING 1 + #define CSMI_FWD_SEV_ERROR 2 + #define CSMI_FWD_SEV_FATAL 3 + +}; + +/* CSMI RAID class structures */ +struct atto_csmi_get_raid_info { + u32 num_raid_sets; + u32 max_drivesper_set; + u8 reserved[92]; +}; + +struct atto_csmi_raid_drives { + char model[40]; + char firmware[8]; + char serial_num[40]; + u8 sas_addr[8]; + u8 lun[8]; + u8 drive_sts; + #define CSMI_DRV_STS_OK 0 + #define CSMI_DRV_STS_REBUILDING 1 + #define CSMI_DRV_STS_FAILED 2 + #define CSMI_DRV_STS_DEGRADED 3 + + u8 drive_usage; + #define CSMI_DRV_USE_NOT_USED 0 + #define CSMI_DRV_USE_MEMBER 1 + #define CSMI_DRV_USE_SPARE 2 + + u8 reserved[30]; /* spec says 22 */ +}; + +struct atto_csmi_get_raid_cfg { + u32 raid_set_index; + u32 capacity; + u32 stripe_size; + u8 raid_type; + u8 status; + u8 information; + u8 drive_cnt; + u8 reserved[20]; + + struct atto_csmi_raid_drives drives[1]; +}; + +/* CSMI HBA class structures */ +struct atto_csmi_phy_entity { + u8 ident_frame[0x1C]; + u8 port_id; + u8 neg_link_rate; + u8 min_link_rate; + u8 max_link_rate; + u8 phy_change_cnt; + u8 auto_discover; + #define CSMI_DISC_NOT_SUPPORTED 0x00 + #define CSMI_DISC_NOT_STARTED 0x01 + #define CSMI_DISC_IN_PROGRESS 0x02 + #define CSMI_DISC_COMPLETE 0x03 + #define CSMI_DISC_ERROR 0x04 + + u8 reserved[2]; + u8 attach_ident_frame[0x1C]; +}; + +struct atto_csmi_get_phy_info { + u8 number_of_phys; + u8 reserved[3]; + struct atto_csmi_phy_entity + phy[32]; +}; + +struct atto_csmi_set_phy_info { + u8 phy_id; + u8 neg_link_rate; + #define CSMI_NEG_RATE_NEGOTIATE 0x00 + #define CSMI_NEG_RATE_PHY_DIS 0x01 + + u8 prog_minlink_rate; + u8 prog_maxlink_rate; + u8 signal_class; + #define CSMI_SIG_CLASS_UNKNOWN 0x00 + #define CSMI_SIG_CLASS_DIRECT 0x01 + #define CSMI_SIG_CLASS_SERVER 0x02 + #define CSMI_SIG_CLASS_ENCLOSURE 0x03 + + u8 reserved[3]; +}; + +struct atto_csmi_get_link_errors { + u8 phy_id; + u8 reset_cnts; + #define CSMI_RESET_CNTS_NO 0x00 + #define CSMI_RESET_CNTS_YES 0x01 + + u8 reserved[2]; + u32 inv_dw_cnt; + u32 disp_err_cnt; + u32 loss_ofdw_sync_cnt; + u32 phy_reseterr_cnt; + + /* + * The following field has been added by ATTO for ease of + * implementation of additional statistics. Drivers must validate + * the length of the IOCTL payload prior to filling them in so CSMI + * complaint applications function correctly. + */ + + u32 crc_err_cnt; +}; + +struct atto_csmi_smp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u32 req_len; + u8 smp_req[1020]; + u8 conn_sts; + u8 reserved2[3]; + u32 rsp_len; + u8 smp_rsp[1020]; +}; + +struct atto_csmi_ssp_passthru_sts { + u8 conn_sts; + u8 reserved[3]; + u8 data_present; + u8 status; + u16 rsp_length; + u8 rsp[256]; + u32 data_bytes; +}; + +struct atto_csmi_ssp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u8 lun[8]; + u8 cdb_len; + u8 add_cdb_len; + u8 reserved2[2]; + u8 cdb[16]; + u32 flags; + #define CSMI_SSPF_DD_READ 0x00000001 + #define CSMI_SSPF_DD_WRITE 0x00000002 + #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004 + #define CSMI_SSPF_TA_SIMPLE 0x00000000 + #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010 + #define CSMI_SSPF_TA_ORDERED 0x00000020 + #define CSMI_SSPF_TA_ACA 0x00000040 + + u8 add_cdb[24]; + u32 data_len; + + struct atto_csmi_ssp_passthru_sts sts; +}; + +struct atto_csmi_stp_passthru_sts { + u8 conn_sts; + u8 reserved[3]; + u8 sts_fis[20]; + u32 scr[16]; + u32 data_bytes; +}; + +struct atto_csmi_stp_passthru { + u8 phy_id; + u8 port_id; + u8 conn_rate; + u8 reserved; + u8 dest_sas_addr[8]; + u8 reserved2[4]; + u8 command_fis[20]; + u32 flags; + #define CSMI_STPF_DD_READ 0x00000001 + #define CSMI_STPF_DD_WRITE 0x00000002 + #define CSMI_STPF_DD_UNSPECIFIED 0x00000004 + #define CSMI_STPF_PIO 0x00000010 + #define CSMI_STPF_DMA 0x00000020 + #define CSMI_STPF_PACKET 0x00000040 + #define CSMI_STPF_DMA_QUEUED 0x00000080 + #define CSMI_STPF_EXECUTE_DIAG 0x00000100 + #define CSMI_STPF_RESET_DEVICE 0x00000200 + + u32 data_len; + + struct atto_csmi_stp_passthru_sts sts; +}; + +struct atto_csmi_get_sata_sig { + u8 phy_id; + u8 reserved[3]; + u8 reg_dth_fis[20]; +}; + +struct atto_csmi_get_scsi_addr { + u8 sas_addr[8]; + u8 sas_lun[8]; + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; +}; + +struct atto_csmi_get_dev_addr { + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; + u8 sas_addr[8]; + u8 sas_lun[8]; +}; + +struct atto_csmi_task_mgmt { + u8 host_index; + u8 path_id; + u8 target_id; + u8 lun; + u32 flags; + #define CSMI_TMF_TASK_IU 0x00000001 + #define CSMI_TMF_HARD_RST 0x00000002 + #define CSMI_TMF_SUPPRESS_RSLT 0x00000004 + + u32 queue_tag; + u32 reserved; + u8 task_mgt_func; + u8 reserved2[7]; + u32 information; + #define CSMI_TM_INFO_TEST 1 + #define CSMI_TM_INFO_EXCEEDED 2 + #define CSMI_TM_INFO_DEMAND 3 + #define CSMI_TM_INFO_TRIGGER 4 + + struct atto_csmi_ssp_passthru_sts sts; + +}; + +struct atto_csmi_get_conn_info { + u32 pinout; + #define CSMI_CON_UNKNOWN 0x00000001 + #define CSMI_CON_SFF_8482 0x00000002 + #define CSMI_CON_SFF_8470_LANE_1 0x00000100 + #define CSMI_CON_SFF_8470_LANE_2 0x00000200 + #define CSMI_CON_SFF_8470_LANE_3 0x00000400 + #define CSMI_CON_SFF_8470_LANE_4 0x00000800 + #define CSMI_CON_SFF_8484_LANE_1 0x00010000 + #define CSMI_CON_SFF_8484_LANE_2 0x00020000 + #define CSMI_CON_SFF_8484_LANE_3 0x00040000 + #define CSMI_CON_SFF_8484_LANE_4 0x00080000 + + u8 connector[16]; + u8 location; + #define CSMI_CON_INTERNAL 0x02 + #define CSMI_CON_EXTERNAL 0x04 + #define CSMI_CON_SWITCHABLE 0x08 + #define CSMI_CON_AUTO 0x10 + + u8 reserved[15]; +}; + +/* CSMI PHY class structures */ +struct atto_csmi_character { + u8 type_flags; + #define CSMI_CTF_POS_DISP 0x01 + #define CSMI_CTF_NEG_DISP 0x02 + #define CSMI_CTF_CTRL_CHAR 0x04 + + u8 value; +}; + +struct atto_csmi_pc_ctrl { + u8 type; + #define CSMI_PC_TYPE_UNDEFINED 0x00 + #define CSMI_PC_TYPE_SATA 0x01 + #define CSMI_PC_TYPE_SAS 0x02 + u8 rate; + u8 reserved[6]; + u32 vendor_unique[8]; + u32 tx_flags; + #define CSMI_PC_TXF_PREEMP_DIS 0x00000001 + + signed char tx_amplitude; + signed char tx_preemphasis; + signed char tx_slew_rate; + signed char tx_reserved[13]; + u8 tx_vendor_unique[64]; + u32 rx_flags; + #define CSMI_PC_RXF_EQ_DIS 0x00000001 + + signed char rx_threshold; + signed char rx_equalization_gain; + signed char rx_reserved[14]; + u8 rx_vendor_unique[64]; + u32 pattern_flags; + #define CSMI_PC_PATF_FIXED 0x00000001 + #define CSMI_PC_PATF_DIS_SCR 0x00000002 + #define CSMI_PC_PATF_DIS_ALIGN 0x00000004 + #define CSMI_PC_PATF_DIS_SSC 0x00000008 + + u8 fixed_pattern; + #define CSMI_PC_FP_CJPAT 0x00000001 + #define CSMI_PC_FP_ALIGN 0x00000002 + + u8 user_pattern_len; + u8 pattern_reserved[6]; + + struct atto_csmi_character user_pattern_buffer[16]; +}; + +struct atto_csmi_phy_ctrl { + u32 function; + #define CSMI_PC_FUNC_GET_SETUP 0x00000100 + + u8 phy_id; + u16 len_of_cntl; + u8 num_of_cntls; + u8 reserved[4]; + u32 link_flags; + #define CSMI_PHY_ACTIVATE_CTRL 0x00000001 + #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002 + #define CSMI_PHY_AUTO_COMWAKE 0x00000004 + + u8 spinup_rate; + u8 link_reserved[7]; + u32 vendor_unique[8]; + + struct atto_csmi_pc_ctrl control[1]; +}; + +union atto_ioctl_csmi { + struct atto_csmi_get_driver_info drvr_info; + struct atto_csmi_get_cntlr_cfg cntlr_cfg; + struct atto_csmi_get_cntlr_sts cntlr_sts; + struct atto_csmi_fw_download fw_dwnld; + struct atto_csmi_get_raid_info raid_info; + struct atto_csmi_get_raid_cfg raid_cfg; + struct atto_csmi_get_phy_info get_phy_info; + struct atto_csmi_set_phy_info set_phy_info; + struct atto_csmi_get_link_errors link_errs; + struct atto_csmi_smp_passthru smp_pass_thru; + struct atto_csmi_ssp_passthru ssp_pass_thru; + struct atto_csmi_stp_passthru stp_pass_thru; + struct atto_csmi_task_mgmt tsk_mgt; + struct atto_csmi_get_sata_sig sata_sig; + struct atto_csmi_get_scsi_addr scsi_addr; + struct atto_csmi_get_dev_addr dev_addr; + struct atto_csmi_get_conn_info conn_info[32]; + struct atto_csmi_phy_ctrl phy_ctrl; +}; + +struct atto_csmi { + u32 control_code; + u32 status; + union atto_ioctl_csmi data; +}; + +struct atto_module_info { + void *adapter; + void *pci_dev; + void *scsi_host; + unsigned short host_no; + union { + struct { + u64 node_name; + u64 port_name; + }; + u64 sas_addr; + }; +}; + +#define ATTO_FUNC_GET_ADAP_INFO 0x00 +#define ATTO_VER_GET_ADAP_INFO0 0 +#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0 + +struct __packed atto_hba_get_adapter_info { + + struct { + u16 vendor_id; + u16 device_id; + u16 ss_vendor_id; + u16 ss_device_id; + u8 class_code[3]; + u8 rev_id; + u8 bus_num; + u8 dev_num; + u8 func_num; + u8 link_width_max; + u8 link_width_curr; + #define ATTO_GAI_PCILW_UNKNOWN 0x00 + + u8 link_speed_max; + u8 link_speed_curr; + #define ATTO_GAI_PCILS_UNKNOWN 0x00 + #define ATTO_GAI_PCILS_GEN1 0x01 + #define ATTO_GAI_PCILS_GEN2 0x02 + #define ATTO_GAI_PCILS_GEN3 0x03 + + u8 interrupt_mode; + #define ATTO_GAI_PCIIM_UNKNOWN 0x00 + #define ATTO_GAI_PCIIM_LEGACY 0x01 + #define ATTO_GAI_PCIIM_MSI 0x02 + #define ATTO_GAI_PCIIM_MSIX 0x03 + + u8 msi_vector_cnt; + u8 reserved[19]; + } pci; + + u8 adap_type; + #define ATTO_GAI_AT_EPCIU320 0x00 + #define ATTO_GAI_AT_ESASRAID 0x01 + #define ATTO_GAI_AT_ESASRAID2 0x02 + #define ATTO_GAI_AT_ESASHBA 0x03 + #define ATTO_GAI_AT_ESASHBA2 0x04 + #define ATTO_GAI_AT_CELERITY 0x05 + #define ATTO_GAI_AT_CELERITY8 0x06 + #define ATTO_GAI_AT_FASTFRAME 0x07 + #define ATTO_GAI_AT_ESASHBA3 0x08 + #define ATTO_GAI_AT_CELERITY16 0x09 + #define ATTO_GAI_AT_TLSASHBA 0x0A + #define ATTO_GAI_AT_ESASHBA4 0x0B + + u8 adap_flags; + #define ATTO_GAI_AF_DEGRADED 0x01 + #define ATTO_GAI_AF_SPT_SUPP 0x02 + #define ATTO_GAI_AF_DEVADDR_SUPP 0x04 + #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08 + #define ATTO_GAI_AF_TEST_SUPP 0x10 + #define ATTO_GAI_AF_DIAG_SUPP 0x20 + #define ATTO_GAI_AF_VIRT_SES 0x40 + #define ATTO_GAI_AF_CONN_CTRL 0x80 + + u8 num_ports; + u8 num_phys; + u8 drvr_rev_major; + u8 drvr_rev_minor; + u8 drvr_revsub_minor; + u8 drvr_rev_build; + char drvr_rev_ascii[16]; + char drvr_name[32]; + char firmware_rev[16]; + char flash_rev[16]; + char model_name_short[16]; + char model_name[32]; + u32 num_targets; + u32 num_targsper_bus; + u32 num_lunsper_targ; + u8 num_busses; + u8 num_connectors; + u8 adap_flags2; + #define ATTO_GAI_AF2_FCOE_SUPP 0x01 + #define ATTO_GAI_AF2_NIC_SUPP 0x02 + #define ATTO_GAI_AF2_LOCATE_SUPP 0x04 + #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08 + #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10 + #define ATTO_GAI_AF2_NPIV_SUPP 0x20 + #define ATTO_GAI_AF2_MP_SUPP 0x40 + + u8 num_temp_sensors; + u32 num_targets_backend; + u32 tunnel_flags; + #define ATTO_GAI_TF_MEM_RW 0x00000001 + #define ATTO_GAI_TF_TRACE 0x00000002 + #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004 + #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008 + #define ATTO_GAI_TF_PHY_CTRL 0x00000010 + #define ATTO_GAI_TF_CONN_CTRL 0x00000020 + #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040 + + u8 reserved3[0x138]; +}; + +#define ATTO_FUNC_GET_ADAP_ADDR 0x01 +#define ATTO_VER_GET_ADAP_ADDR0 0 +#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0 + +struct __packed atto_hba_get_adapter_address { + + u8 addr_type; + #define ATTO_GAA_AT_PORT 0x00 + #define ATTO_GAA_AT_NODE 0x01 + #define ATTO_GAA_AT_CURR_MAC 0x02 + #define ATTO_GAA_AT_PERM_MAC 0x03 + #define ATTO_GAA_AT_VNIC 0x04 + + u8 port_id; + u16 addr_len; + u8 address[256]; +}; + +#define ATTO_FUNC_MEM_RW 0x02 +#define ATTO_VER_MEM_RW0 0 +#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0 + +struct __packed atto_hba_memory_read_write { + u8 mem_func; + u8 mem_type; + union { + u8 pci_index; + u8 i2c_dev; + }; + u8 i2c_status; + u32 length; + u64 address; + u8 reserved[48]; + +}; + +#define ATTO_FUNC_TRACE 0x03 +#define ATTO_VER_TRACE0 0 +#define ATTO_VER_TRACE1 1 +#define ATTO_VER_TRACE ATTO_VER_TRACE1 + +struct __packed atto_hba_trace { + u8 trace_func; + #define ATTO_TRC_TF_GET_INFO 0x00 + #define ATTO_TRC_TF_ENABLE 0x01 + #define ATTO_TRC_TF_DISABLE 0x02 + #define ATTO_TRC_TF_SET_MASK 0x03 + #define ATTO_TRC_TF_UPLOAD 0x04 + #define ATTO_TRC_TF_RESET 0x05 + + u8 trace_type; + #define ATTO_TRC_TT_DRIVER 0x00 + #define ATTO_TRC_TT_FWCOREDUMP 0x01 + + u8 reserved[2]; + u32 current_offset; + u32 total_length; + u32 trace_mask; + u8 reserved2[48]; +}; + +#define ATTO_FUNC_SCSI_PASS_THRU 0x04 +#define ATTO_VER_SCSI_PASS_THRU0 0 +#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0 + +struct __packed atto_hba_scsi_pass_thru { + u8 cdb[32]; + u8 cdb_length; + u8 req_status; + #define ATTO_SPT_RS_SUCCESS 0x00 + #define ATTO_SPT_RS_FAILED 0x01 + #define ATTO_SPT_RS_OVERRUN 0x02 + #define ATTO_SPT_RS_UNDERRUN 0x03 + #define ATTO_SPT_RS_NO_DEVICE 0x04 + #define ATTO_SPT_RS_NO_LUN 0x05 + #define ATTO_SPT_RS_TIMEOUT 0x06 + #define ATTO_SPT_RS_BUS_RESET 0x07 + #define ATTO_SPT_RS_ABORTED 0x08 + #define ATTO_SPT_RS_BUSY 0x09 + #define ATTO_SPT_RS_DEGRADED 0x0A + + u8 scsi_status; + u8 sense_length; + u32 flags; + #define ATTO_SPTF_DATA_IN 0x00000001 + #define ATTO_SPTF_DATA_OUT 0x00000002 + #define ATTO_SPTF_SIMPLE_Q 0x00000004 + #define ATTO_SPTF_HEAD_OF_Q 0x00000008 + #define ATTO_SPTF_ORDERED_Q 0x00000010 + + u32 timeout; + u32 target_id; + u8 lun[8]; + u32 residual_length; + u8 sense_data[0xFC]; + u8 reserved[0x28]; +}; + +#define ATTO_FUNC_GET_DEV_ADDR 0x05 +#define ATTO_VER_GET_DEV_ADDR0 0 +#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0 + +struct __packed atto_hba_get_device_address { + u8 addr_type; + #define ATTO_GDA_AT_PORT 0x00 + #define ATTO_GDA_AT_NODE 0x01 + #define ATTO_GDA_AT_MAC 0x02 + #define ATTO_GDA_AT_PORTID 0x03 + #define ATTO_GDA_AT_UNIQUE 0x04 + + u8 reserved; + u16 addr_len; + u32 target_id; + u8 address[256]; +}; + +/* The following functions are supported by firmware but do not have any + * associated driver structures + */ +#define ATTO_FUNC_PHY_CTRL 0x06 +#define ATTO_FUNC_CONN_CTRL 0x0C +#define ATTO_FUNC_ADAP_CTRL 0x0E +#define ATTO_VER_ADAP_CTRL0 0 +#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0 + +struct __packed atto_hba_adap_ctrl { + u8 adap_func; + #define ATTO_AC_AF_HARD_RST 0x00 + #define ATTO_AC_AF_GET_STATE 0x01 + #define ATTO_AC_AF_GET_TEMP 0x02 + + u8 adap_state; + #define ATTO_AC_AS_UNKNOWN 0x00 + #define ATTO_AC_AS_OK 0x01 + #define ATTO_AC_AS_RST_SCHED 0x02 + #define ATTO_AC_AS_RST_IN_PROG 0x03 + #define ATTO_AC_AS_RST_DISC 0x04 + #define ATTO_AC_AS_DEGRADED 0x05 + #define ATTO_AC_AS_DISABLED 0x06 + #define ATTO_AC_AS_TEMP 0x07 + + u8 reserved[2]; + + union { + struct { + u8 temp_sensor; + u8 temp_state; + + #define ATTO_AC_TS_UNSUPP 0x00 + #define ATTO_AC_TS_UNKNOWN 0x01 + #define ATTO_AC_TS_INIT_FAILED 0x02 + #define ATTO_AC_TS_NORMAL 0x03 + #define ATTO_AC_TS_OUT_OF_RANGE 0x04 + #define ATTO_AC_TS_FAULT 0x05 + + signed short temp_value; + signed short temp_lower_lim; + signed short temp_upper_lim; + char temp_desc[32]; + u8 reserved2[20]; + }; + }; +}; + +#define ATTO_FUNC_GET_DEV_INFO 0x0F +#define ATTO_VER_GET_DEV_INFO0 0 +#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0 + +struct __packed atto_hba_sas_device_info { + + #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16 + + u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */ + #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV + u32 exp_target_id; + u32 sas_port_mask; + u8 sas_level; + #define ATTO_SDI_SAS_LVL_INV 0xFF + + u8 slot_num; + #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV + + u8 dev_type; + #define ATTO_SDI_DT_END_DEVICE 0 + #define ATTO_SDI_DT_EXPANDER 1 + #define ATTO_SDI_DT_PORT_MULT 2 + + u8 ini_flags; + u8 tgt_flags; + u8 link_rate; /* SMP_RATE_XXX */ + u8 loc_flags; + #define ATTO_SDI_LF_DIRECT 0x01 + #define ATTO_SDI_LF_EXPANDER 0x02 + #define ATTO_SDI_LF_PORT_MULT 0x04 + u8 pm_port; + u8 reserved[0x60]; +}; + +union atto_hba_device_info { + struct atto_hba_sas_device_info sas_dev_info; +}; + +struct __packed atto_hba_get_device_info { + u32 target_id; + u8 info_type; + #define ATTO_GDI_IT_UNKNOWN 0x00 + #define ATTO_GDI_IT_SAS 0x01 + #define ATTO_GDI_IT_FC 0x02 + #define ATTO_GDI_IT_FCOE 0x03 + + u8 reserved[11]; + union atto_hba_device_info dev_info; +}; + +struct atto_ioctl { + u8 version; + u8 function; /* ATTO_FUNC_XXX */ + u8 status; +#define ATTO_STS_SUCCESS 0x00 +#define ATTO_STS_FAILED 0x01 +#define ATTO_STS_INV_VERSION 0x02 +#define ATTO_STS_OUT_OF_RSRC 0x03 +#define ATTO_STS_INV_FUNC 0x04 +#define ATTO_STS_UNSUPPORTED 0x05 +#define ATTO_STS_INV_ADAPTER 0x06 +#define ATTO_STS_INV_DRVR_VER 0x07 +#define ATTO_STS_INV_PARAM 0x08 +#define ATTO_STS_TIMEOUT 0x09 +#define ATTO_STS_NOT_APPL 0x0A +#define ATTO_STS_DEGRADED 0x0B + + u8 flags; + #define HBAF_TUNNEL 0x01 + + u32 data_length; + u8 reserved2[56]; + + union { + u8 byte[1]; + struct atto_hba_get_adapter_info get_adap_info; + struct atto_hba_get_adapter_address get_adap_addr; + struct atto_hba_scsi_pass_thru scsi_pass_thru; + struct atto_hba_get_device_address get_dev_addr; + struct atto_hba_adap_ctrl adap_ctrl; + struct atto_hba_get_device_info get_dev_info; + struct atto_hba_trace trace; + } data; + +}; + +struct __packed atto_ioctl_vda_scsi_cmd { + + #define ATTO_VDA_SCSI_VER0 0 + #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0 + + u8 cdb[16]; + u32 flags; + u32 data_length; + u32 residual_length; + u16 target_id; + u8 sense_len; + u8 scsi_stat; + u8 reserved[8]; + u8 sense_data[80]; +}; + +struct __packed atto_ioctl_vda_flash_cmd { + + #define ATTO_VDA_FLASH_VER0 0 + #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0 + + u32 flash_addr; + u32 data_length; + u8 sub_func; + u8 reserved[15]; + + union { + struct { + u32 flash_size; + u32 page_size; + u8 prod_info[32]; + } info; + + struct { + char file_name[16]; /* 8.3 fname, NULL term, wc=* */ + u32 file_size; + } file; + } data; + +}; + +struct __packed atto_ioctl_vda_diag_cmd { + + #define ATTO_VDA_DIAG_VER0 0 + #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0 + + u64 local_addr; + u32 data_length; + u8 sub_func; + u8 flags; + u8 reserved[3]; +}; + +struct __packed atto_ioctl_vda_cli_cmd { + + #define ATTO_VDA_CLI_VER0 0 + #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0 + + u32 cmd_rsp_len; +}; + +struct __packed atto_ioctl_vda_smp_cmd { + + #define ATTO_VDA_SMP_VER0 0 + #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0 + + u64 dest; + u32 cmd_rsp_len; +}; + +struct __packed atto_ioctl_vda_cfg_cmd { + + #define ATTO_VDA_CFG_VER0 0 + #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0 + + u32 data_length; + u8 cfg_func; + u8 reserved[11]; + + union { + u8 bytes[112]; + struct atto_vda_cfg_init init; + } data; + +}; + +struct __packed atto_ioctl_vda_mgt_cmd { + + #define ATTO_VDA_MGT_VER0 0 + #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0 + + u8 mgt_func; + u8 scan_generation; + u16 dev_index; + u32 data_length; + u8 reserved[8]; + union { + u8 bytes[112]; + struct atto_vda_devinfo dev_info; + struct atto_vda_grp_info grp_info; + struct atto_vdapart_info part_info; + struct atto_vda_dh_info dh_info; + struct atto_vda_metrics_info metrics_info; + struct atto_vda_schedule_info sched_info; + struct atto_vda_n_vcache_info nvcache_info; + struct atto_vda_buzzer_info buzzer_info; + struct atto_vda_adapter_info adapter_info; + struct atto_vda_temp_info temp_info; + struct atto_vda_fan_info fan_info; + } data; +}; + +struct __packed atto_ioctl_vda_gsv_cmd { + + #define ATTO_VDA_GSV_VER0 0 + #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0 + + u8 rsp_len; + u8 reserved[7]; + u8 version_info[1]; + #define ATTO_VDA_VER_UNSUPPORTED 0xFF + +}; + +struct __packed atto_ioctl_vda { + u8 version; + u8 function; /* VDA_FUNC_XXXX */ + u8 status; /* ATTO_STS_XXX */ + u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */ + u32 data_length; + u8 reserved[8]; + + union { + struct atto_ioctl_vda_scsi_cmd scsi; + struct atto_ioctl_vda_flash_cmd flash; + struct atto_ioctl_vda_diag_cmd diag; + struct atto_ioctl_vda_cli_cmd cli; + struct atto_ioctl_vda_smp_cmd smp; + struct atto_ioctl_vda_cfg_cmd cfg; + struct atto_ioctl_vda_mgt_cmd mgt; + struct atto_ioctl_vda_gsv_cmd gsv; + u8 cmd_info[256]; + } cmd; + + union { + u8 data[1]; + struct atto_vda_devinfo2 dev_info2; + } data; + +}; + +struct __packed atto_ioctl_smp { + u8 version; + #define ATTO_SMP_VERSION0 0 + #define ATTO_SMP_VERSION1 1 + #define ATTO_SMP_VERSION2 2 + #define ATTO_SMP_VERSION ATTO_SMP_VERSION2 + + u8 function; +#define ATTO_SMP_FUNC_DISC_SMP 0x00 +#define ATTO_SMP_FUNC_DISC_TARG 0x01 +#define ATTO_SMP_FUNC_SEND_CMD 0x02 +#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03 +#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04 +#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05 + + u8 status; /* ATTO_STS_XXX */ + u8 smp_status; /* if status == ATTO_STS_SUCCESS */ + #define ATTO_SMP_STS_SUCCESS 0x00 + #define ATTO_SMP_STS_FAILURE 0x01 + #define ATTO_SMP_STS_RESCAN 0x02 + #define ATTO_SMP_STS_NOT_FOUND 0x03 + + u16 target_id; + u8 phy_id; + u8 dev_index; + u64 smp_sas_addr; + u64 targ_sas_addr; + u32 req_length; + u32 rsp_length; + u8 flags; + #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */ + + u8 reserved[31]; + + union { + u8 byte[1]; + u32 dword[1]; + } data; + +}; + +struct __packed atto_express_ioctl { + struct atto_express_ioctl_header header; + + union { + struct atto_firmware_rw_request fwrw; + struct atto_param_rw_request prw; + struct atto_channel_list chanlist; + struct atto_channel_info chaninfo; + struct atto_ioctl ioctl_hba; + struct atto_module_info modinfo; + struct atto_ioctl_vda ioctl_vda; + struct atto_ioctl_smp ioctl_smp; + struct atto_csmi csmi; + + } data; +}; + +/* The struct associated with the code is listed after the definition */ +#define EXPRESS_IOCTL_MIN 0x4500 +#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */ +#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */ +#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */ +#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */ +#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */ +#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */ +#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */ +#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */ +#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */ +#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */ +#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */ +#define EXPRESS_CSMI 0x450B /* CSMI */ +#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */ +#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */ +#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */ +#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */ +#define EXPRESS_IOCTL_MAX 0x450F + +#endif diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h new file mode 100644 index 00000000000..5fc1f991d24 --- /dev/null +++ b/drivers/scsi/esas2r/atvda.h @@ -0,0 +1,1319 @@ +/* linux/drivers/scsi/esas2r/atvda.h + * ATTO VDA interface definitions + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + + +#ifndef ATVDA_H +#define ATVDA_H + +struct __packed atto_dev_addr { + u64 dev_port; + u64 hba_port; + u8 lun; + u8 flags; + #define VDA_DEVADDRF_SATA 0x01 + #define VDA_DEVADDRF_SSD 0x02 + u8 link_speed; /* VDALINKSPEED_xxx */ + u8 pad[1]; +}; + +/* dev_addr2 was added for 64-bit alignment */ + +struct __packed atto_dev_addr2 { + u64 dev_port; + u64 hba_port; + u8 lun; + u8 flags; + u8 link_speed; + u8 pad[5]; +}; + +struct __packed atto_vda_sge { + u32 length; + u64 address; +}; + + +/* VDA request function codes */ + +#define VDA_FUNC_SCSI 0x00 +#define VDA_FUNC_FLASH 0x01 +#define VDA_FUNC_DIAG 0x02 +#define VDA_FUNC_AE 0x03 +#define VDA_FUNC_CLI 0x04 +#define VDA_FUNC_IOCTL 0x05 +#define VDA_FUNC_CFG 0x06 +#define VDA_FUNC_MGT 0x07 +#define VDA_FUNC_GSV 0x08 + + +/* VDA request status values. for host driver considerations, values for + * SCSI requests start at zero. other requests may use these values as well. */ + +#define RS_SUCCESS 0x00 /*! successful completion */ +#define RS_INV_FUNC 0x01 /*! invalid command function */ +#define RS_BUSY 0x02 /*! insufficient resources */ +#define RS_SEL 0x03 /*! no target at target_id */ +#define RS_NO_LUN 0x04 /*! invalid LUN */ +#define RS_TIMEOUT 0x05 /*! request timeout */ +#define RS_OVERRUN 0x06 /*! data overrun */ +#define RS_UNDERRUN 0x07 /*! data underrun */ +#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */ +#define RS_ABORTED 0x0A /*! command aborted */ +#define RS_RESID_MISM 0x0B /*! residual length incorrect */ +#define RS_TM_FAILED 0x0C /*! task management failed */ +#define RS_RESET 0x0D /*! aborted due to bus reset */ +#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */ +#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */ +#define RS_UNSUPPORTED 0x10 /*! unsupported request */ +#define RS_SEL2 0x70 /*! internal generated RS_SEL */ +#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */ +#define RS_MGT_BASE 0x80 /*! base of VDA management errors */ +#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00) +#define RS_DEV_INVALID (RS_MGT_BASE + 0x01) +#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02) +#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03) +#define RS_DEV_LOST (RS_MGT_BASE + 0x04) +#define RS_SCAN_GEN (RS_MGT_BASE + 0x05) +#define RS_GRP_INVALID (RS_MGT_BASE + 0x08) +#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09) +#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A) +#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B) +#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C) +#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D) +#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E) +#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F) +#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10) +#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11) +#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12) +#define RS_CFG_SAVE (RS_MGT_BASE + 0x14) +#define RS_PART_LAST (RS_MGT_BASE + 0x18) +#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19) +#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A) +#define RS_PART_TARGET (RS_MGT_BASE + 0x1B) +#define RS_PART_LUN (RS_MGT_BASE + 0x1C) +#define RS_PART_DUP (RS_MGT_BASE + 0x1D) +#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E) +#define RS_PART_MAX (RS_MGT_BASE + 0x1F) +#define RS_PART_CAP (RS_MGT_BASE + 0x20) +#define RS_PART_STATE (RS_MGT_BASE + 0x21) +#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22) +#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23) +#define RS_HS_ERROR (RS_MGT_BASE + 0x24) +#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25) +#define RS_BAD_PARAM (RS_MGT_BASE + 0x26) +#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27) +#define RS_FLS_BASE 0xB0 /*! base of VDA errors */ +#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00) +#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01) +#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02) +#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03) +#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04) +#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05) +#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06) +#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07) +#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08) +#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */ +#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0) +#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1) +#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2) +#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3) +#define RS_DEGRADED 0xFB /*! degraded mode */ +#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */ +#define RS_VDA_INTERNAL 0xFD /*! catch-all */ +#define RS_PENDING 0xFE /*! pending, not started */ +#define RS_STARTED 0xFF /*! started */ + + +/* flash request subfunctions. these are used in both the IOCTL and the + * driver-firmware interface (VDA_FUNC_FLASH). */ + +#define VDA_FLASH_BEGINW 0x00 +#define VDA_FLASH_READ 0x01 +#define VDA_FLASH_WRITE 0x02 +#define VDA_FLASH_COMMIT 0x03 +#define VDA_FLASH_CANCEL 0x04 +#define VDA_FLASH_INFO 0x05 +#define VDA_FLASH_FREAD 0x06 +#define VDA_FLASH_FWRITE 0x07 +#define VDA_FLASH_FINFO 0x08 + + +/* IOCTL request subfunctions. these identify the payload type for + * VDA_FUNC_IOCTL. + */ + +#define VDA_IOCTL_HBA 0x00 +#define VDA_IOCTL_CSMI 0x01 +#define VDA_IOCTL_SMP 0x02 + +struct __packed atto_vda_devinfo { + struct atto_dev_addr dev_addr; + u8 vendor_id[8]; + u8 product_id[16]; + u8 revision[4]; + u64 capacity; + u32 block_size; + u8 dev_type; + + union { + u8 dev_status; + #define VDADEVSTAT_INVALID 0x00 + #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID + #define VDADEVSTAT_ASSIGNED 0x01 + #define VDADEVSTAT_SPARE 0x02 + #define VDADEVSTAT_UNAVAIL 0x03 + #define VDADEVSTAT_PT_MAINT 0x04 + #define VDADEVSTAT_LCLSPARE 0x05 + #define VDADEVSTAT_UNUSEABLE 0x06 + #define VDADEVSTAT_AVAIL 0xFF + + u8 op_ctrl; + #define VDA_DEV_OP_CTRL_START 0x01 + #define VDA_DEV_OP_CTRL_HALT 0x02 + #define VDA_DEV_OP_CTRL_RESUME 0x03 + #define VDA_DEV_OP_CTRL_CANCEL 0x04 + }; + + u8 member_state; + #define VDAMBRSTATE_ONLINE 0x00 + #define VDAMBRSTATE_DEGRADED 0x01 + #define VDAMBRSTATE_UNAVAIL 0x02 + #define VDAMBRSTATE_FAULTED 0x03 + #define VDAMBRSTATE_MISREAD 0x04 + #define VDAMBRSTATE_INCOMPAT 0x05 + + u8 operation; + #define VDAOP_NONE 0x00 + #define VDAOP_REBUILD 0x01 + #define VDAOP_ERASE 0x02 + #define VDAOP_PATTERN 0x03 + #define VDAOP_CONVERSION 0x04 + #define VDAOP_FULL_INIT 0x05 + #define VDAOP_QUICK_INIT 0x06 + #define VDAOP_SECT_SCAN 0x07 + #define VDAOP_SECT_SCAN_PARITY 0x08 + #define VDAOP_SECT_SCAN_PARITY_FIX 0x09 + #define VDAOP_RECOV_REBUILD 0x0A + + u8 op_status; + #define VDAOPSTAT_OK 0x00 + #define VDAOPSTAT_FAULTED 0x01 + #define VDAOPSTAT_HALTED 0x02 + #define VDAOPSTAT_INT 0x03 + + u8 progress; /* 0 - 100% */ + u16 ses_dev_index; + #define VDASESDI_INVALID 0xFFFF + + u8 serial_no[32]; + + union { + u16 target_id; + #define VDATGTID_INVALID 0xFFFF + + u16 features_mask; + }; + + u16 lun; + u16 features; + #define VDADEVFEAT_ENC_SERV 0x0001 + #define VDADEVFEAT_IDENT 0x0002 + #define VDADEVFEAT_DH_SUPP 0x0004 + #define VDADEVFEAT_PHYS_ID 0x0008 + + u8 ses_element_id; + u8 link_speed; + #define VDALINKSPEED_UNKNOWN 0x00 + #define VDALINKSPEED_1GB 0x01 + #define VDALINKSPEED_1_5GB 0x02 + #define VDALINKSPEED_2GB 0x03 + #define VDALINKSPEED_3GB 0x04 + #define VDALINKSPEED_4GB 0x05 + #define VDALINKSPEED_6GB 0x06 + #define VDALINKSPEED_8GB 0x07 + + u16 phys_target_id; + u8 reserved[2]; +}; + + +/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it + * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore, + * the entire structure is DMaed between the firmware and host buffer and + * the data will always be in little endian format. + */ + +struct __packed atto_vda_devinfo2 { + struct atto_dev_addr dev_addr; + u8 vendor_id[8]; + u8 product_id[16]; + u8 revision[4]; + u64 capacity; + u32 block_size; + u8 dev_type; + u8 dev_status; + u8 member_state; + u8 operation; + u8 op_status; + u8 progress; + u16 ses_dev_index; + u8 serial_no[32]; + union { + u16 target_id; + u16 features_mask; + }; + + u16 lun; + u16 features; + u8 ses_element_id; + u8 link_speed; + u16 phys_target_id; + u8 reserved[2]; + +/* This is where fields specific to struct atto_vda_devinfo2 begin. Note + * that the structure version started at one so applications that unionize this + * structure with atto_vda_dev_info can differentiate them if desired. + */ + + u8 version; + #define VDADEVINFO_VERSION0 0x00 + #define VDADEVINFO_VERSION1 0x01 + #define VDADEVINFO_VERSION2 0x02 + #define VDADEVINFO_VERSION3 0x03 + #define VDADEVINFO_VERSION VDADEVINFO_VERSION3 + + u8 reserved2[3]; + + /* sector scanning fields */ + + u32 ss_curr_errors; + u64 ss_curr_scanned; + u32 ss_curr_recvrd; + u32 ss_scan_length; + u32 ss_total_errors; + u32 ss_total_recvrd; + u32 ss_num_scans; + + /* grp_name was added in version 2 of this structure. */ + + char grp_name[15]; + u8 reserved3[4]; + + /* dev_addr_list was added in version 3 of this structure. */ + + u8 num_dev_addr; + struct atto_dev_addr2 dev_addr_list[8]; +}; + + +struct __packed atto_vda_grp_info { + u8 grp_index; + #define VDA_MAX_RAID_GROUPS 32 + + char grp_name[15]; + u64 capacity; + u32 block_size; + u32 interleave; + u8 type; + #define VDA_GRP_TYPE_RAID0 0 + #define VDA_GRP_TYPE_RAID1 1 + #define VDA_GRP_TYPE_RAID4 4 + #define VDA_GRP_TYPE_RAID5 5 + #define VDA_GRP_TYPE_RAID6 6 + #define VDA_GRP_TYPE_RAID10 10 + #define VDA_GRP_TYPE_RAID40 40 + #define VDA_GRP_TYPE_RAID50 50 + #define VDA_GRP_TYPE_RAID60 60 + #define VDA_GRP_TYPE_DVRAID_HS 252 + #define VDA_GRP_TYPE_DVRAID_NOHS 253 + #define VDA_GRP_TYPE_JBOD 254 + #define VDA_GRP_TYPE_SPARE 255 + + union { + u8 status; + #define VDA_GRP_STAT_INVALID 0x00 + #define VDA_GRP_STAT_NEW 0x01 + #define VDA_GRP_STAT_WAITING 0x02 + #define VDA_GRP_STAT_ONLINE 0x03 + #define VDA_GRP_STAT_DEGRADED 0x04 + #define VDA_GRP_STAT_OFFLINE 0x05 + #define VDA_GRP_STAT_DELETED 0x06 + #define VDA_GRP_STAT_RECOV_BASIC 0x07 + #define VDA_GRP_STAT_RECOV_EXTREME 0x08 + + u8 op_ctrl; + #define VDA_GRP_OP_CTRL_START 0x01 + #define VDA_GRP_OP_CTRL_HALT 0x02 + #define VDA_GRP_OP_CTRL_RESUME 0x03 + #define VDA_GRP_OP_CTRL_CANCEL 0x04 + }; + + u8 rebuild_state; + #define VDA_RBLD_NONE 0x00 + #define VDA_RBLD_REBUILD 0x01 + #define VDA_RBLD_ERASE 0x02 + #define VDA_RBLD_PATTERN 0x03 + #define VDA_RBLD_CONV 0x04 + #define VDA_RBLD_FULL_INIT 0x05 + #define VDA_RBLD_QUICK_INIT 0x06 + #define VDA_RBLD_SECT_SCAN 0x07 + #define VDA_RBLD_SECT_SCAN_PARITY 0x08 + #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09 + #define VDA_RBLD_RECOV_REBUILD 0x0A + #define VDA_RBLD_RECOV_BASIC 0x0B + #define VDA_RBLD_RECOV_EXTREME 0x0C + + u8 span_depth; + u8 progress; + u8 mirror_width; + u8 stripe_width; + u8 member_cnt; + + union { + u16 members[32]; + #define VDA_MEMBER_MISSING 0xFFFF + #define VDA_MEMBER_NEW 0xFFFE + u16 features_mask; + }; + + u16 features; + #define VDA_GRP_FEAT_HOTSWAP 0x0001 + #define VDA_GRP_FEAT_SPDRD_MASK 0x0006 + #define VDA_GRP_FEAT_SPDRD_DIS 0x0000 + #define VDA_GRP_FEAT_SPDRD_ENB 0x0002 + #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004 + #define VDA_GRP_FEAT_IDENT 0x0008 + #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030 + #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010 + #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020 + #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030 + #define VDA_GRP_FEAT_WRITE_CACHE 0x0040 + #define VDA_GRP_FEAT_RBLD_RESUME 0x0080 + #define VDA_GRP_FEAT_SECT_RESUME 0x0100 + #define VDA_GRP_FEAT_INIT_RESUME 0x0200 + #define VDA_GRP_FEAT_SSD 0x0400 + #define VDA_GRP_FEAT_BOOT_DEV 0x0800 + + /* + * for backward compatibility, a prefetch value of zero means the + * setting is ignored/unsupported. therefore, the firmware supported + * 0-6 values are incremented to 1-7. + */ + + u8 prefetch; + u8 op_status; + #define VDAGRPOPSTAT_MASK 0x0F + #define VDAGRPOPSTAT_INVALID 0x00 + #define VDAGRPOPSTAT_OK 0x01 + #define VDAGRPOPSTAT_FAULTED 0x02 + #define VDAGRPOPSTAT_HALTED 0x03 + #define VDAGRPOPSTAT_INT 0x04 + #define VDAGRPOPPROC_MASK 0xF0 + #define VDAGRPOPPROC_STARTABLE 0x10 + #define VDAGRPOPPROC_CANCELABLE 0x20 + #define VDAGRPOPPROC_RESUMABLE 0x40 + #define VDAGRPOPPROC_HALTABLE 0x80 + u8 over_provision; + u8 reserved[3]; + +}; + + +struct __packed atto_vdapart_info { + u8 part_no; + #define VDA_MAX_PARTITIONS 128 + + char grp_name[15]; + u64 part_size; + u64 start_lba; + u32 block_size; + u16 target_id; + u8 LUN; + char serial_no[41]; + u8 features; + #define VDAPI_FEAT_WRITE_CACHE 0x01 + + u8 reserved[7]; +}; + + +struct __packed atto_vda_dh_info { + u8 req_type; + #define VDADH_RQTYPE_CACHE 0x01 + #define VDADH_RQTYPE_FETCH 0x02 + #define VDADH_RQTYPE_SET_STAT 0x03 + #define VDADH_RQTYPE_GET_STAT 0x04 + + u8 req_qual; + #define VDADH_RQQUAL_SMART 0x01 + #define VDADH_RQQUAL_MEDDEF 0x02 + #define VDADH_RQQUAL_INFOEXC 0x04 + + u8 num_smart_attribs; + u8 status; + #define VDADH_STAT_DISABLE 0x00 + #define VDADH_STAT_ENABLE 0x01 + + u32 med_defect_cnt; + u32 info_exc_cnt; + u8 smart_status; + #define VDADH_SMARTSTAT_OK 0x00 + #define VDADH_SMARTSTAT_ERR 0x01 + + u8 reserved[35]; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_dh_smart { + u8 attrib_id; + u8 current_val; + u8 worst; + u8 threshold; + u8 raw_data[6]; + u8 raw_attrib_status; + #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01 + #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02 + #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04 + #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08 + #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10 + #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20 + + u8 calc_attrib_status; + #define VDADHSM_CALCSTAT_UNKNOWN 0x00 + #define VDADHSM_CALCSTAT_GOOD 0x01 + #define VDADHSM_CALCSTAT_PREFAIL 0x02 + #define VDADHSM_CALCSTAT_OLDAGE 0x03 + + u8 reserved[4]; +}; + + +struct __packed atto_vda_metrics_info { + u8 data_version; + #define VDAMET_VERSION0 0x00 + #define VDAMET_VERSION VDAMET_VERSION0 + + u8 metrics_action; + #define VDAMET_METACT_NONE 0x00 + #define VDAMET_METACT_START 0x01 + #define VDAMET_METACT_STOP 0x02 + #define VDAMET_METACT_RETRIEVE 0x03 + #define VDAMET_METACT_CLEAR 0x04 + + u8 test_action; + #define VDAMET_TSTACT_NONE 0x00 + #define VDAMET_TSTACT_STRT_INIT 0x01 + #define VDAMET_TSTACT_STRT_READ 0x02 + #define VDAMET_TSTACT_STRT_VERIFY 0x03 + #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04 + #define VDAMET_TSTACT_STOP 0x05 + + u8 num_dev_indexes; + #define VDAMET_ALL_DEVICES 0xFF + + u16 dev_indexes[32]; + u8 reserved[12]; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_metrics_data { + u16 dev_index; + u16 length; + #define VDAMD_LEN_LAST 0x8000 + #define VDAMD_LEN_MASK 0x0FFF + + u32 flags; + #define VDAMDF_RUN 0x00000007 + #define VDAMDF_RUN_READ 0x00000001 + #define VDAMDF_RUN_WRITE 0x00000002 + #define VDAMDF_RUN_ALL 0x00000004 + #define VDAMDF_READ 0x00000010 + #define VDAMDF_WRITE 0x00000020 + #define VDAMDF_ALL 0x00000040 + #define VDAMDF_DRIVETEST 0x40000000 + #define VDAMDF_NEW 0x80000000 + + u64 total_read_data; + u64 total_write_data; + u64 total_read_io; + u64 total_write_io; + u64 read_start_time; + u64 read_stop_time; + u64 write_start_time; + u64 write_stop_time; + u64 read_maxio_time; + u64 wpvdadmetricsdatarite_maxio_time; + u64 read_totalio_time; + u64 write_totalio_time; + u64 read_total_errs; + u64 write_total_errs; + u64 read_recvrd_errs; + u64 write_recvrd_errs; + u64 miscompares; +}; + + +struct __packed atto_vda_schedule_info { + u8 schedule_type; + #define VDASI_SCHTYPE_ONETIME 0x01 + #define VDASI_SCHTYPE_DAILY 0x02 + #define VDASI_SCHTYPE_WEEKLY 0x03 + + u8 operation; + #define VDASI_OP_NONE 0x00 + #define VDASI_OP_CREATE 0x01 + #define VDASI_OP_CANCEL 0x02 + + u8 hour; + u8 minute; + u8 day; + #define VDASI_DAY_NONE 0x00 + + u8 progress; + #define VDASI_PROG_NONE 0xFF + + u8 event_type; + #define VDASI_EVTTYPE_SECT_SCAN 0x01 + #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02 + #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03 + + u8 recurrences; + #define VDASI_RECUR_FOREVER 0x00 + + u32 id; + #define VDASI_ID_NONE 0x00 + + char grp_name[15]; + u8 reserved[85]; +}; + + +struct __packed atto_vda_n_vcache_info { + u8 super_cap_status; + #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00 + #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01 + #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02 + + u8 nvcache_module_status; + #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00 + #define VDANVCI_NVCACHEMODULE_PRESENT 0x01 + + u8 protection_mode; + #define VDANVCI_PROTMODE_HI_PROTECT 0x00 + #define VDANVCI_PROTMODE_HI_PERFORM 0x01 + + u8 reserved[109]; +}; + + +struct __packed atto_vda_buzzer_info { + u8 status; + #define VDABUZZI_BUZZER_OFF 0x00 + #define VDABUZZI_BUZZER_ON 0x01 + #define VDABUZZI_BUZZER_LAST 0x02 + + u8 reserved[3]; + u32 duration; + #define VDABUZZI_DURATION_INDEFINITE 0xffffffff + + u8 reserved2[104]; +}; + + +struct __packed atto_vda_adapter_info { + u8 version; + #define VDAADAPINFO_VERSION0 0x00 + #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0 + + u8 reserved; + signed short utc_offset; + u32 utc_time; + u32 features; + #define VDA_ADAP_FEAT_IDENT 0x0001 + #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002 + #define VDA_ADAP_FEAT_UTC_TIME 0x0004 + + u32 valid_features; + char active_config[33]; + u8 temp_count; + u8 fan_count; + u8 reserved3[61]; +}; + + +struct __packed atto_vda_temp_info { + u8 temp_index; + u8 max_op_temp; + u8 min_op_temp; + u8 op_temp_warn; + u8 temperature; + u8 type; + #define VDA_TEMP_TYPE_CPU 1 + + u8 reserved[106]; +}; + + +struct __packed atto_vda_fan_info { + u8 fan_index; + u8 status; + #define VDA_FAN_STAT_UNKNOWN 0 + #define VDA_FAN_STAT_NORMAL 1 + #define VDA_FAN_STAT_FAIL 2 + + u16 crit_pvdafaninfothreshold; + u16 warn_threshold; + u16 speed; + u8 reserved[104]; +}; + + +/* VDA management commands */ + +#define VDAMGT_DEV_SCAN 0x00 +#define VDAMGT_DEV_INFO 0x01 +#define VDAMGT_DEV_CLEAN 0x02 +#define VDAMGT_DEV_IDENTIFY 0x03 +#define VDAMGT_DEV_IDENTSTOP 0x04 +#define VDAMGT_DEV_PT_INFO 0x05 +#define VDAMGT_DEV_FEATURES 0x06 +#define VDAMGT_DEV_PT_FEATURES 0x07 +#define VDAMGT_DEV_HEALTH_REQ 0x08 +#define VDAMGT_DEV_METRICS 0x09 +#define VDAMGT_DEV_INFO2 0x0A +#define VDAMGT_DEV_OPERATION 0x0B +#define VDAMGT_DEV_INFO2_BYADDR 0x0C +#define VDAMGT_GRP_INFO 0x10 +#define VDAMGT_GRP_CREATE 0x11 +#define VDAMGT_GRP_DELETE 0x12 +#define VDAMGT_ADD_STORAGE 0x13 +#define VDAMGT_MEMBER_ADD 0x14 +#define VDAMGT_GRP_COMMIT 0x15 +#define VDAMGT_GRP_REBUILD 0x16 +#define VDAMGT_GRP_COMMIT_INIT 0x17 +#define VDAMGT_QUICK_RAID 0x18 +#define VDAMGT_GRP_FEATURES 0x19 +#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A +#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B +#define VDAMGT_GRP_OPERATION 0x1C +#define VDAMGT_CFG_SAVE 0x20 +#define VDAMGT_LAST_ERROR 0x21 +#define VDAMGT_ADAP_INFO 0x22 +#define VDAMGT_ADAP_FEATURES 0x23 +#define VDAMGT_TEMP_INFO 0x24 +#define VDAMGT_FAN_INFO 0x25 +#define VDAMGT_PART_INFO 0x30 +#define VDAMGT_PART_MAP 0x31 +#define VDAMGT_PART_UNMAP 0x32 +#define VDAMGT_PART_AUTOMAP 0x33 +#define VDAMGT_PART_SPLIT 0x34 +#define VDAMGT_PART_MERGE 0x35 +#define VDAMGT_SPARE_LIST 0x40 +#define VDAMGT_SPARE_ADD 0x41 +#define VDAMGT_SPARE_REMOVE 0x42 +#define VDAMGT_LOCAL_SPARE_ADD 0x43 +#define VDAMGT_SCHEDULE_EVENT 0x50 +#define VDAMGT_SCHEDULE_INFO 0x51 +#define VDAMGT_NVCACHE_INFO 0x60 +#define VDAMGT_NVCACHE_SET 0x61 +#define VDAMGT_BUZZER_INFO 0x70 +#define VDAMGT_BUZZER_SET 0x71 + + +struct __packed atto_vda_ae_hdr { + u8 bylength; + u8 byflags; + #define VDAAE_HDRF_EVENT_ACK 0x01 + + u8 byversion; + #define VDAAE_HDR_VER_0 0 + + u8 bytype; + #define VDAAE_HDR_TYPE_RAID 1 + #define VDAAE_HDR_TYPE_LU 2 + #define VDAAE_HDR_TYPE_DISK 3 + #define VDAAE_HDR_TYPE_RESET 4 + #define VDAAE_HDR_TYPE_LOG_INFO 5 + #define VDAAE_HDR_TYPE_LOG_WARN 6 + #define VDAAE_HDR_TYPE_LOG_CRIT 7 + #define VDAAE_HDR_TYPE_LOG_FAIL 8 + #define VDAAE_HDR_TYPE_NVC 9 + #define VDAAE_HDR_TYPE_TLG_INFO 10 + #define VDAAE_HDR_TYPE_TLG_WARN 11 + #define VDAAE_HDR_TYPE_TLG_CRIT 12 + #define VDAAE_HDR_TYPE_PWRMGT 13 + #define VDAAE_HDR_TYPE_MUTE 14 + #define VDAAE_HDR_TYPE_DEV 15 +}; + + +struct __packed atto_vda_ae_raid { + struct atto_vda_ae_hdr hdr; + u32 dwflags; + #define VDAAE_GROUP_STATE 0x00000001 + #define VDAAE_RBLD_STATE 0x00000002 + #define VDAAE_RBLD_PROG 0x00000004 + #define VDAAE_MEMBER_CHG 0x00000008 + #define VDAAE_PART_CHG 0x00000010 + #define VDAAE_MEM_STATE_CHG 0x00000020 + + u8 bygroup_state; + #define VDAAE_RAID_INVALID 0 + #define VDAAE_RAID_NEW 1 + #define VDAAE_RAID_WAITING 2 + #define VDAAE_RAID_ONLINE 3 + #define VDAAE_RAID_DEGRADED 4 + #define VDAAE_RAID_OFFLINE 5 + #define VDAAE_RAID_DELETED 6 + #define VDAAE_RAID_BASIC 7 + #define VDAAE_RAID_EXTREME 8 + #define VDAAE_RAID_UNKNOWN 9 + + u8 byrebuild_state; + #define VDAAE_RBLD_NONE 0 + #define VDAAE_RBLD_REBUILD 1 + #define VDAAE_RBLD_ERASE 2 + #define VDAAE_RBLD_PATTERN 3 + #define VDAAE_RBLD_CONV 4 + #define VDAAE_RBLD_FULL_INIT 5 + #define VDAAE_RBLD_QUICK_INIT 6 + #define VDAAE_RBLD_SECT_SCAN 7 + #define VDAAE_RBLD_SECT_SCAN_PARITY 8 + #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9 + #define VDAAE_RBLD_RECOV_REBUILD 10 + #define VDAAE_RBLD_UNKNOWN 11 + + u8 byrebuild_progress; + u8 op_status; + #define VDAAE_GRPOPSTAT_MASK 0x0F + #define VDAAE_GRPOPSTAT_INVALID 0x00 + #define VDAAE_GRPOPSTAT_OK 0x01 + #define VDAAE_GRPOPSTAT_FAULTED 0x02 + #define VDAAE_GRPOPSTAT_HALTED 0x03 + #define VDAAE_GRPOPSTAT_INT 0x04 + #define VDAAE_GRPOPPROC_MASK 0xF0 + #define VDAAE_GRPOPPROC_STARTABLE 0x10 + #define VDAAE_GRPOPPROC_CANCELABLE 0x20 + #define VDAAE_GRPOPPROC_RESUMABLE 0x40 + #define VDAAE_GRPOPPROC_HALTABLE 0x80 + char acname[15]; + u8 byreserved; + u8 byreserved2[0x80 - 0x1C]; +}; + + +struct __packed atto_vda_ae_lu_tgt_lun { + u16 wtarget_id; + u8 bylun; + u8 byreserved; +}; + + +struct __packed atto_vda_ae_lu_tgt_lun_raid { + u16 wtarget_id; + u8 bylun; + u8 byreserved; + u32 dwinterleave; + u32 dwblock_size; +}; + + +struct __packed atto_vda_ae_lu { + struct atto_vda_ae_hdr hdr; + u32 dwevent; + #define VDAAE_LU_DISC 0x00000001 + #define VDAAE_LU_LOST 0x00000002 + #define VDAAE_LU_STATE 0x00000004 + #define VDAAE_LU_PASSTHROUGH 0x10000000 + #define VDAAE_LU_PHYS_ID 0x20000000 + + u8 bystate; + #define VDAAE_LU_UNDEFINED 0 + #define VDAAE_LU_NOT_PRESENT 1 + #define VDAAE_LU_OFFLINE 2 + #define VDAAE_LU_ONLINE 3 + #define VDAAE_LU_DEGRADED 4 + #define VDAAE_LU_FACTORY_DISABLED 5 + #define VDAAE_LU_DELETED 6 + #define VDAAE_LU_BUSSCAN 7 + #define VDAAE_LU_UNKNOWN 8 + + u8 byreserved; + u16 wphys_target_id; + + union { + struct atto_vda_ae_lu_tgt_lun tgtlun; + struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid; + } id; +}; + + +struct __packed atto_vda_ae_disk { + struct atto_vda_ae_hdr hdr; +}; + + +#define VDAAE_LOG_STRSZ 64 + +struct __packed atto_vda_ae_log { + struct atto_vda_ae_hdr hdr; + char aclog_ascii[VDAAE_LOG_STRSZ]; +}; + + +#define VDAAE_TLG_STRSZ 56 + +struct __packed atto_vda_ae_timestamp_log { + struct atto_vda_ae_hdr hdr; + u32 dwtimestamp; + char aclog_ascii[VDAAE_TLG_STRSZ]; +}; + + +struct __packed atto_vda_ae_nvc { + struct atto_vda_ae_hdr hdr; +}; + + +struct __packed atto_vda_ae_dev { + struct atto_vda_ae_hdr hdr; + struct atto_dev_addr devaddr; +}; + + +union atto_vda_ae { + struct atto_vda_ae_hdr hdr; + struct atto_vda_ae_disk disk; + struct atto_vda_ae_lu lu; + struct atto_vda_ae_raid raid; + struct atto_vda_ae_log log; + struct atto_vda_ae_timestamp_log tslog; + struct atto_vda_ae_nvc nvcache; + struct atto_vda_ae_dev dev; +}; + + +struct __packed atto_vda_date_and_time { + u8 flags; + #define VDA_DT_DAY_MASK 0x07 + #define VDA_DT_DAY_NONE 0x00 + #define VDA_DT_DAY_SUN 0x01 + #define VDA_DT_DAY_MON 0x02 + #define VDA_DT_DAY_TUE 0x03 + #define VDA_DT_DAY_WED 0x04 + #define VDA_DT_DAY_THU 0x05 + #define VDA_DT_DAY_FRI 0x06 + #define VDA_DT_DAY_SAT 0x07 + #define VDA_DT_PM 0x40 + #define VDA_DT_MILITARY 0x80 + + u8 seconds; + u8 minutes; + u8 hours; + u8 day; + u8 month; + u16 year; +}; + +#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */ +#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */ +#define SGE_LAST 0x01000000 /*! last entry */ +#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */ +#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */ +#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */ +#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */ + + +struct __packed atto_vda_cfg_init { + struct atto_vda_date_and_time date_time; + u32 sgl_page_size; + u32 vda_version; + u32 fw_version; + u32 fw_build; + u32 fw_release; + u32 epoch_time; + u32 ioctl_tunnel; + #define VDA_ITF_MEM_RW 0x00000001 + #define VDA_ITF_TRACE 0x00000002 + #define VDA_ITF_SCSI_PASS_THRU 0x00000004 + #define VDA_ITF_GET_DEV_ADDR 0x00000008 + #define VDA_ITF_PHY_CTRL 0x00000010 + #define VDA_ITF_CONN_CTRL 0x00000020 + #define VDA_ITF_GET_DEV_INFO 0x00000040 + + u32 num_targets_backend; + u8 reserved[0x48]; +}; + + +/* configuration commands */ + +#define VDA_CFG_INIT 0x00 +#define VDA_CFG_GET_INIT 0x01 +#define VDA_CFG_GET_INIT2 0x02 + + +/*! physical region descriptor (PRD) aka scatter/gather entry */ + +struct __packed atto_physical_region_description { + u64 address; + u32 ctl_len; + #define PRD_LEN_LIMIT 0x003FFFFF + #define PRD_LEN_MAX 0x003FF000 + #define PRD_NXT_PRD_CNT 0x0000007F + #define PRD_CHAIN 0x01000000 + #define PRD_DATA 0x00000000 + #define PRD_INT_SEL 0xF0000000 + #define PRD_INT_SEL_F0 0x00000000 + #define PRD_INT_SEL_F1 0x40000000 + #define PRD_INT_SEL_F2 0x80000000 + #define PRD_INT_SEL_F3 0xc0000000 + #define PRD_INT_SEL_SRAM 0x10000000 + #define PRD_INT_SEL_PBSR 0x20000000 + +}; + +/* Request types. NOTE that ALL requests have the same layout for the first + * few bytes. + */ +struct __packed atto_vda_req_header { + u32 length; + u8 function; + u8 variable1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; +}; + + +#define FCP_CDB_SIZE 16 + +struct __packed atto_vda_scsi_req { + u32 length; + u8 function; /* VDA_FUNC_SCSI */ + u8 sense_len; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 flags; + #define FCP_CMND_LUN_MASK 0x000000FF + #define FCP_CMND_TA_MASK 0x00000700 + #define FCP_CMND_TA_SIMPL_Q 0x00000000 + #define FCP_CMND_TA_HEAD_Q 0x00000100 + #define FCP_CMND_TA_ORDRD_Q 0x00000200 + #define FCP_CMND_TA_ACA 0x00000400 + #define FCP_CMND_PRI_MASK 0x00007800 + #define FCP_CMND_TM_MASK 0x00FF0000 + #define FCP_CMND_ATS 0x00020000 + #define FCP_CMND_CTS 0x00040000 + #define FCP_CMND_LRS 0x00100000 + #define FCP_CMND_TRS 0x00200000 + #define FCP_CMND_CLA 0x00400000 + #define FCP_CMND_TRM 0x00800000 + #define FCP_CMND_DATA_DIR 0x03000000 + #define FCP_CMND_WRD 0x01000000 + #define FCP_CMND_RDD 0x02000000 + + u8 cdb[FCP_CDB_SIZE]; + union { + struct __packed { + u64 ppsense_buf; + u16 target_id; + u8 iblk_cnt_prd; + u8 reserved; + }; + + struct atto_physical_region_description sense_buff_prd; + }; + + union { + struct atto_vda_sge sge[1]; + + u32 abort_handle; + u32 dwords[245]; + struct atto_physical_region_description prd[1]; + } u; +}; + + +struct __packed atto_vda_flash_req { + u32 length; + u8 function; /* VDA_FUNC_FLASH */ + u8 sub_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 flash_addr; + u8 checksum; + u8 rsvd[3]; + + union { + struct { + char file_name[16]; /* 8.3 fname, NULL term, wc=* */ + struct atto_vda_sge sge[1]; + } file; + + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[2]; + } data; +}; + + +struct __packed atto_vda_diag_req { + u32 length; + u8 function; /* VDA_FUNC_DIAG */ + u8 sub_func; + #define VDA_DIAG_STATUS 0x00 + #define VDA_DIAG_RESET 0x01 + #define VDA_DIAG_PAUSE 0x02 + #define VDA_DIAG_RESUME 0x03 + #define VDA_DIAG_READ 0x04 + #define VDA_DIAG_WRITE 0x05 + + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 rsvd; + u64 local_addr; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_ae_req { + u32 length; + u8 function; /* VDA_FUNC_AE */ + u8 reserved1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + + union { + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[1]; + }; +}; + + +struct __packed atto_vda_cli_req { + u32 length; + u8 function; /* VDA_FUNC_CLI */ + u8 reserved1; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u32 cmd_rsp_len; + struct atto_vda_sge sge[1]; +}; + + +struct __packed atto_vda_ioctl_req { + u32 length; + u8 function; /* VDA_FUNC_IOCTL */ + u8 sub_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + + union { + struct atto_vda_sge reserved_sge; + struct atto_physical_region_description reserved_prde; + }; + + union { + struct { + u32 ctrl_code; + u16 target_id; + u8 lun; + u8 reserved; + } csmi; + }; + + union { + struct atto_vda_sge sge[1]; + struct atto_physical_region_description prde[1]; + }; +}; + + +struct __packed atto_vda_cfg_req { + u32 length; + u8 function; /* VDA_FUNC_CFG */ + u8 sub_func; + u8 rsvd1; + u8 sg_list_offset; + u32 handle; + + union { + u8 bytes[116]; + struct atto_vda_cfg_init init; + struct atto_vda_sge sge; + struct atto_physical_region_description prde; + } data; +}; + + +struct __packed atto_vda_mgmt_req { + u32 length; + u8 function; /* VDA_FUNC_MGT */ + u8 mgt_func; + u8 chain_offset; + u8 sg_list_offset; + u32 handle; + u8 scan_generation; + u8 payld_sglst_offset; + u16 dev_index; + u32 payld_length; + u32 pad; + union { + struct atto_vda_sge sge[2]; + struct atto_physical_region_description prde[2]; + }; + struct atto_vda_sge payld_sge[1]; +}; + + +union atto_vda_req { + struct atto_vda_scsi_req scsi; + struct atto_vda_flash_req flash; + struct atto_vda_diag_req diag; + struct atto_vda_ae_req ae; + struct atto_vda_cli_req cli; + struct atto_vda_ioctl_req ioctl; + struct atto_vda_cfg_req cfg; + struct atto_vda_mgmt_req mgt; + u8 bytes[1024]; +}; + +/* Outbound response structures */ + +struct __packed atto_vda_scsi_rsp { + u8 scsi_stat; + u8 sense_len; + u8 rsvd[2]; + u32 residual_length; +}; + +struct __packed atto_vda_flash_rsp { + u32 file_size; +}; + +struct __packed atto_vda_ae_rsp { + u32 length; +}; + +struct __packed atto_vda_cli_rsp { + u32 cmd_rsp_len; +}; + +struct __packed atto_vda_ioctl_rsp { + union { + struct { + u32 csmi_status; + u16 target_id; + u8 lun; + u8 reserved; + } csmi; + }; +}; + +struct __packed atto_vda_cfg_rsp { + u16 vda_version; + u16 fw_release; + u32 fw_build; +}; + +struct __packed atto_vda_mgmt_rsp { + u32 length; + u16 dev_index; + u8 scan_generation; +}; + +union atto_vda_func_rsp { + struct atto_vda_scsi_rsp scsi_rsp; + struct atto_vda_flash_rsp flash_rsp; + struct atto_vda_ae_rsp ae_rsp; + struct atto_vda_cli_rsp cli_rsp; + struct atto_vda_ioctl_rsp ioctl_rsp; + struct atto_vda_cfg_rsp cfg_rsp; + struct atto_vda_mgmt_rsp mgt_rsp; + u32 dwords[2]; +}; + +struct __packed atto_vda_ob_rsp { + u32 handle; + u8 req_stat; + u8 rsvd[3]; + + union atto_vda_func_rsp + func_rsp; +}; + +struct __packed atto_vda_ae_data { + u8 event_data[256]; +}; + +struct __packed atto_vda_mgmt_data { + union { + u8 bytes[112]; + struct atto_vda_devinfo dev_info; + struct atto_vda_grp_info grp_info; + struct atto_vdapart_info part_info; + struct atto_vda_dh_info dev_health_info; + struct atto_vda_metrics_info metrics_info; + struct atto_vda_schedule_info sched_info; + struct atto_vda_n_vcache_info nvcache_info; + struct atto_vda_buzzer_info buzzer_info; + } data; +}; + +union atto_vda_rsp_data { + struct atto_vda_ae_data ae_data; + struct atto_vda_mgmt_data mgt_data; + u8 sense_data[252]; + #define SENSE_DATA_SZ 252; + u8 bytes[256]; +}; + +#endif diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h new file mode 100644 index 00000000000..3fd305d6b67 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r.h @@ -0,0 +1,1431 @@ +/* + * linux/drivers/scsi/esas2r/esas2r.h + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/proc_fs.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> + +#include "esas2r_log.h" +#include "atioctl.h" +#include "atvda.h" + +#ifndef ESAS2R_H +#define ESAS2R_H + +/* Global Variables */ +extern struct esas2r_adapter *esas2r_adapters[]; +extern u8 *esas2r_buffered_ioctl; +extern dma_addr_t esas2r_buffered_ioctl_addr; +extern u32 esas2r_buffered_ioctl_size; +extern struct pci_dev *esas2r_buffered_ioctl_pcid; +#define SGL_PG_SZ_MIN 64 +#define SGL_PG_SZ_MAX 1024 +extern int sgl_page_size; +#define NUM_SGL_MIN 8 +#define NUM_SGL_MAX 2048 +extern int num_sg_lists; +#define NUM_REQ_MIN 4 +#define NUM_REQ_MAX 256 +extern int num_requests; +#define NUM_AE_MIN 2 +#define NUM_AE_MAX 8 +extern int num_ae_requests; +extern int cmd_per_lun; +extern int can_queue; +extern int esas2r_max_sectors; +extern int sg_tablesize; +extern int interrupt_mode; +extern int num_io_requests; + +/* Macro defintions */ +#define ESAS2R_MAX_ID 255 +#define MAX_ADAPTERS 32 +#define ESAS2R_DRVR_NAME "esas2r" +#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter" +#define ESAS2R_MAX_DEVICES 32 +#define ATTONODE_NAME "ATTONode" +#define ESAS2R_MAJOR_REV 1 +#define ESAS2R_MINOR_REV 00 +#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \ + DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV) +#define ESAS2R_COPYRIGHT_YEARS "2001-2013" +#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384 +#define ESAS2R_DEFAULT_CMD_PER_LUN 64 +#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024 +#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num) +#define NUM_TO_STR(num) #num + +#define ESAS2R_SGL_ALIGN 16 +#define ESAS2R_LIST_ALIGN 16 +#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA +#define ESAS2R_DATA_BUF_LEN 256 +#define ESAS2R_DEFAULT_TMO 5000 +#define ESAS2R_DISC_BUF_LEN 512 +#define ESAS2R_FWCOREDUMP_SZ 0x80000 +#define ESAS2R_NUM_PHYS 8 +#define ESAS2R_TARG_ID_INV 0xFFFF +#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK +#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK +#define ESAS2R_INT_DIS_MASK 0 +#define ESAS2R_MAX_TARGETS 256 +#define ESAS2R_KOBJ_NAME_LEN 20 + +/* u16 (WORD) component macros */ +#define LOBYTE(w) ((u8)(u16)(w)) +#define HIBYTE(w) ((u8)(((u16)(w)) >> 8)) +#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8))) + +/* u32 (DWORD) component macros */ +#define LOWORD(d) ((u16)(u32)(d)) +#define HIWORD(d) ((u16)(((u32)(d)) >> 16)) +#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16))) + +/* macro to get the lowest nonzero bit of a value */ +#define LOBIT(x) ((x) & (0 - (x))) + +/* These functions are provided to access the chip's control registers. + * The register is specified by its byte offset from the register base + * for the adapter. + */ +#define esas2r_read_register_dword(a, reg) \ + readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG) + +#define esas2r_write_register_dword(a, reg, data) \ + writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG)) + +#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r) + +/* This function is provided to access the chip's data window. The + * register is specified by its byte offset from the window base + * for the adapter. + */ +#define esas2r_read_data_byte(a, reg) \ + readb((void __iomem *)a->data_window + (reg)) + +/* ATTO vendor and device Ids */ +#define ATTO_VENDOR_ID 0x117C +#define ATTO_DID_INTEL_IOP348 0x002C +#define ATTO_DID_MV_88RC9580 0x0049 +#define ATTO_DID_MV_88RC9580TS 0x0066 +#define ATTO_DID_MV_88RC9580TSE 0x0067 +#define ATTO_DID_MV_88RC9580TL 0x0068 + +/* ATTO subsystem device Ids */ +#define ATTO_SSDID_TBT 0x4000 +#define ATTO_TSSC_3808 0x4066 +#define ATTO_TSSC_3808E 0x4067 +#define ATTO_TLSH_1068 0x4068 +#define ATTO_ESAS_R680 0x0049 +#define ATTO_ESAS_R608 0x004A +#define ATTO_ESAS_R60F 0x004B +#define ATTO_ESAS_R6F0 0x004C +#define ATTO_ESAS_R644 0x004D +#define ATTO_ESAS_R648 0x004E + +/* + * flash definitions & structures + * define the code types + */ +#define FBT_CPYR 0xAA00 +#define FBT_SETUP 0xAA02 +#define FBT_FLASH_VER 0xAA04 + +/* offsets to various locations in flash */ +#define FLS_OFFSET_BOOT (u32)(0x00700000) +#define FLS_OFFSET_NVR (u32)(0x007C0000) +#define FLS_OFFSET_CPYR FLS_OFFSET_NVR +#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT) +#define FLS_BLOCK_SIZE (u32)(0x00020000) +#define FI_NVR_2KB 0x0800 +#define FI_NVR_8KB 0x2000 +#define FM_BUF_SZ 0x800 + +/* + * marvell frey (88R9580) register definitions + * chip revision identifiers + */ +#define MVR_FREY_B2 0xB2 + +/* + * memory window definitions. window 0 is the data window with definitions + * of MW_DATA_XXX. window 1 is the register window with definitions of + * MW_REG_XXX. + */ +#define MW_REG_WINDOW_SIZE (u32)(0x00040000) +#define MW_REG_OFFSET_HWREG (u32)(0x00000000) +#define MW_REG_OFFSET_PCI (u32)(0x00008000) +#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG) +#define MW_DATA_WINDOW_SIZE (u32)(0x00020000) +#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000) +#define MW_DATA_ADDR_SRAM (u32)(0xF4000000) +#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000) + +/* + * the following registers are for the communication + * list interface (AKA message unit (MU)) + */ +#define MU_IN_LIST_ADDR_LO (u32)(0x00004000) +#define MU_IN_LIST_ADDR_HI (u32)(0x00004004) + +#define MU_IN_LIST_WRITE (u32)(0x00004018) + #define MU_ILW_TOGGLE (u32)(0x00004000) + +#define MU_IN_LIST_READ (u32)(0x0000401C) + #define MU_ILR_TOGGLE (u32)(0x00004000) + #define MU_ILIC_LIST (u32)(0x0000000F) + #define MU_ILIC_LIST_F0 (u32)(0x00000000) + #define MU_ILIC_DEST (u32)(0x00000F00) + #define MU_ILIC_DEST_DDR (u32)(0x00000200) +#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028) + +#define MU_IN_LIST_CONFIG (u32)(0x0000402C) + #define MU_ILC_ENABLE (u32)(0x00000001) + #define MU_ILC_ENTRY_MASK (u32)(0x000000F0) + #define MU_ILC_ENTRY_4_DW (u32)(0x00000020) + #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000) + #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000) + #define MU_ILC_NUMBER_SHIFT 16 + +#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050) +#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054) + +#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058) +#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C) + +#define MU_OUT_LIST_WRITE (u32)(0x00004068) + #define MU_OLW_TOGGLE (u32)(0x00004000) + +#define MU_OUT_LIST_COPY (u32)(0x0000406C) + #define MU_OLC_TOGGLE (u32)(0x00004000) + #define MU_OLC_WRT_PTR (u32)(0x00003FFF) + +#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078) + #define MU_OLIC_LIST (u32)(0x0000000F) + #define MU_OLIC_LIST_F0 (u32)(0x00000000) + #define MU_OLIC_SOURCE (u32)(0x00000F00) + #define MU_OLIC_SOURCE_DDR (u32)(0x00000200) + +#define MU_OUT_LIST_CONFIG (u32)(0x0000407C) + #define MU_OLC_ENABLE (u32)(0x00000001) + #define MU_OLC_ENTRY_MASK (u32)(0x000000F0) + #define MU_OLC_ENTRY_4_DW (u32)(0x00000020) + #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000) + #define MU_OLC_NUMBER_SHIFT 16 + +#define MU_OUT_LIST_INT_STAT (u32)(0x00004088) + #define MU_OLIS_INT (u32)(0x00000001) + +#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C) + #define MU_OLIS_MASK (u32)(0x00000001) + +/* + * the maximum size of the communication lists is two greater than the + * maximum amount of VDA requests. the extra are to prevent queue overflow. + */ +#define ESAS2R_MAX_NUM_REQS 256 +#define ESAS2R_NUM_EXTRA 2 +#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA) + +/* + * the following registers are for the CPU interface + */ +#define MU_CTL_STATUS_IN (u32)(0x00010108) + #define MU_CTL_IN_FULL_RST (u32)(0x00000020) +#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130) + #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000) +#define MU_DOORBELL_IN (u32)(0x00010460) + #define DRBL_RESET_BUS (u32)(0x00000002) + #define DRBL_PAUSE_AE (u32)(0x00000004) + #define DRBL_RESUME_AE (u32)(0x00000008) + #define DRBL_MSG_IFC_DOWN (u32)(0x00000010) + #define DRBL_FLASH_REQ (u32)(0x00000020) + #define DRBL_FLASH_DONE (u32)(0x00000040) + #define DRBL_FORCE_INT (u32)(0x00000080) + #define DRBL_MSG_IFC_INIT (u32)(0x00000100) + #define DRBL_POWER_DOWN (u32)(0x00000200) + #define DRBL_DRV_VER_1 (u32)(0x00010000) + #define DRBL_DRV_VER DRBL_DRV_VER_1 +#define MU_DOORBELL_IN_ENB (u32)(0x00010464) +#define MU_DOORBELL_OUT (u32)(0x00010480) + #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000) + #define DRBL_UNUSED_HANDLER (u32)(0x00100000) + #define DRBL_UNDEF_INSTR (u32)(0x00200000) + #define DRBL_PREFETCH_ABORT (u32)(0x00300000) + #define DRBL_DATA_ABORT (u32)(0x00400000) + #define DRBL_JUMP_TO_ZERO (u32)(0x00500000) + #define DRBL_FW_RESET (u32)(0x00080000) + #define DRBL_FW_VER_MSK (u32)(0x00070000) + #define DRBL_FW_VER_0 (u32)(0x00000000) + #define DRBL_FW_VER_1 (u32)(0x00010000) + #define DRBL_FW_VER DRBL_FW_VER_1 +#define MU_DOORBELL_OUT_ENB (u32)(0x00010484) + #define DRBL_ENB_MASK (u32)(0x00F803FF) +#define MU_INT_STATUS_OUT (u32)(0x00010200) + #define MU_INTSTAT_POST_OUT (u32)(0x00000010) + #define MU_INTSTAT_DRBL_IN (u32)(0x00000100) + #define MU_INTSTAT_DRBL (u32)(0x00001000) + #define MU_INTSTAT_MASK (u32)(0x00001010) +#define MU_INT_MASK_OUT (u32)(0x0001020C) + +/* PCI express registers accessed via window 1 */ +#define MVR_PCI_WIN1_REMAP (u32)(0x00008438) + #define MVRPW1R_ENABLE (u32)(0x00000001) + + +/* structures */ + +/* inbound list dynamic source entry */ +struct esas2r_inbound_list_source_entry { + u64 address; + u32 length; + #define HWILSE_INTERFACE_F0 0x00000000 + u32 reserved; +}; + +/* PCI data structure in expansion ROM images */ +struct __packed esas2r_boot_header { + char signature[4]; + u16 vendor_id; + u16 device_id; + u16 VPD; + u16 struct_length; + u8 struct_revision; + u8 class_code[3]; + u16 image_length; + u16 code_revision; + u8 code_type; + #define CODE_TYPE_PC 0 + #define CODE_TYPE_OPEN 1 + #define CODE_TYPE_EFI 3 + u8 indicator; + #define INDICATOR_LAST 0x80 + u8 reserved[2]; +}; + +struct __packed esas2r_boot_image { + u16 signature; + u8 reserved[22]; + u16 header_offset; + u16 pnp_offset; +}; + +struct __packed esas2r_pc_image { + u16 signature; + u8 length; + u8 entry_point[3]; + u8 checksum; + u16 image_end; + u16 min_size; + u8 rom_flags; + u8 reserved[12]; + u16 header_offset; + u16 pnp_offset; + struct esas2r_boot_header boot_image; +}; + +struct __packed esas2r_efi_image { + u16 signature; + u16 length; + u32 efi_signature; + #define EFI_ROM_SIG 0x00000EF1 + u16 image_type; + #define EFI_IMAGE_APP 10 + #define EFI_IMAGE_BSD 11 + #define EFI_IMAGE_RTD 12 + u16 machine_type; + #define EFI_MACHINE_IA32 0x014c + #define EFI_MACHINE_IA64 0x0200 + #define EFI_MACHINE_X64 0x8664 + #define EFI_MACHINE_EBC 0x0EBC + u16 compression; + #define EFI_UNCOMPRESSED 0x0000 + #define EFI_COMPRESSED 0x0001 + u8 reserved[8]; + u16 efi_offset; + u16 header_offset; + u16 reserved2; + struct esas2r_boot_header boot_image; +}; + +struct esas2r_adapter; +struct esas2r_sg_context; +struct esas2r_request; + +typedef void (*RQCALLBK) (struct esas2r_adapter *a, + struct esas2r_request *rq); +typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); + +struct esas2r_component_header { + u8 img_type; + #define CH_IT_FW 0x00 + #define CH_IT_NVR 0x01 + #define CH_IT_BIOS 0x02 + #define CH_IT_MAC 0x03 + #define CH_IT_CFG 0x04 + #define CH_IT_EFI 0x05 + u8 status; + #define CH_STAT_PENDING 0xff + #define CH_STAT_FAILED 0x00 + #define CH_STAT_SUCCESS 0x01 + #define CH_STAT_RETRY 0x02 + #define CH_STAT_INVALID 0x03 + u8 pad[2]; + u32 version; + u32 length; + u32 image_offset; +}; + +#define FI_REL_VER_SZ 16 + +struct esas2r_flash_img_v0 { + u8 fi_version; + #define FI_VERSION_0 00 + u8 status; + u8 adap_typ; + u8 action; + u32 length; + u16 checksum; + u16 driver_error; + u16 flags; + u16 num_comps; + #define FI_NUM_COMPS_V0 5 + u8 rel_version[FI_REL_VER_SZ]; + struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0]; + u8 scratch_buf[FM_BUF_SZ]; +}; + +struct esas2r_flash_img { + u8 fi_version; + #define FI_VERSION_1 01 + u8 status; + #define FI_STAT_SUCCESS 0x00 + #define FI_STAT_FAILED 0x01 + #define FI_STAT_REBOOT 0x02 + #define FI_STAT_ADAPTYP 0x03 + #define FI_STAT_INVALID 0x04 + #define FI_STAT_CHKSUM 0x05 + #define FI_STAT_LENGTH 0x06 + #define FI_STAT_UNKNOWN 0x07 + #define FI_STAT_IMG_VER 0x08 + #define FI_STAT_BUSY 0x09 + #define FI_STAT_DUAL 0x0A + #define FI_STAT_MISSING 0x0B + #define FI_STAT_UNSUPP 0x0C + #define FI_STAT_ERASE 0x0D + #define FI_STAT_FLASH 0x0E + #define FI_STAT_DEGRADED 0x0F + u8 adap_typ; + #define FI_AT_UNKNWN 0xFF + #define FI_AT_SUN_LAKE 0x0B + #define FI_AT_MV_9580 0x0F + u8 action; + #define FI_ACT_DOWN 0x00 + #define FI_ACT_UP 0x01 + #define FI_ACT_UPSZ 0x02 + #define FI_ACT_MAX 0x02 + #define FI_ACT_DOWN1 0x80 + u32 length; + u16 checksum; + u16 driver_error; + u16 flags; + #define FI_FLG_NVR_DEF 0x0001 + u16 num_comps; + #define FI_NUM_COMPS_V1 6 + u8 rel_version[FI_REL_VER_SZ]; + struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1]; + u8 scratch_buf[FM_BUF_SZ]; +}; + +/* definitions for flash script (FS) commands */ +struct esas2r_ioctlfs_command { + u8 command; + #define ESAS2R_FS_CMD_ERASE 0 + #define ESAS2R_FS_CMD_READ 1 + #define ESAS2R_FS_CMD_BEGINW 2 + #define ESAS2R_FS_CMD_WRITE 3 + #define ESAS2R_FS_CMD_COMMIT 4 + #define ESAS2R_FS_CMD_CANCEL 5 + u8 checksum; + u8 reserved[2]; + u32 flash_addr; + u32 length; + u32 image_offset; +}; + +struct esas2r_ioctl_fs { + u8 version; + #define ESAS2R_FS_VER 0 + u8 status; + u8 driver_error; + u8 adap_type; + #define ESAS2R_FS_AT_ESASRAID2 3 + #define ESAS2R_FS_AT_TSSASRAID2 4 + #define ESAS2R_FS_AT_TSSASRAID2E 5 + #define ESAS2R_FS_AT_TLSASHBA 6 + u8 driver_ver; + u8 reserved[11]; + struct esas2r_ioctlfs_command command; + u8 data[1]; +}; + +struct esas2r_sas_nvram { + u8 signature[4]; + u8 version; + #define SASNVR_VERSION_0 0x00 + #define SASNVR_VERSION SASNVR_VERSION_0 + u8 checksum; + #define SASNVR_CKSUM_SEED 0x5A + u8 max_lun_for_target; + u8 pci_latency; + #define SASNVR_PCILAT_DIS 0x00 + #define SASNVR_PCILAT_MIN 0x10 + #define SASNVR_PCILAT_MAX 0xF8 + u8 options1; + #define SASNVR1_BOOT_DRVR 0x01 + #define SASNVR1_BOOT_SCAN 0x02 + #define SASNVR1_DIS_PCI_MWI 0x04 + #define SASNVR1_FORCE_ORD_Q 0x08 + #define SASNVR1_CACHELINE_0 0x10 + #define SASNVR1_DIS_DEVSORT 0x20 + #define SASNVR1_PWR_MGT_EN 0x40 + #define SASNVR1_WIDEPORT 0x80 + u8 options2; + #define SASNVR2_SINGLE_BUS 0x01 + #define SASNVR2_SLOT_BIND 0x02 + #define SASNVR2_EXP_PROG 0x04 + #define SASNVR2_CMDTHR_LUN 0x08 + #define SASNVR2_HEARTBEAT 0x10 + #define SASNVR2_INT_CONNECT 0x20 + #define SASNVR2_SW_MUX_CTRL 0x40 + #define SASNVR2_DISABLE_NCQ 0x80 + u8 int_coalescing; + #define SASNVR_COAL_DIS 0x00 + #define SASNVR_COAL_LOW 0x01 + #define SASNVR_COAL_MED 0x02 + #define SASNVR_COAL_HI 0x03 + u8 cmd_throttle; + #define SASNVR_CMDTHR_NONE 0x00 + u8 dev_wait_time; + u8 dev_wait_count; + u8 spin_up_delay; + #define SASNVR_SPINUP_MAX 0x14 + u8 ssp_align_rate; + u8 sas_addr[8]; + u8 phy_speed[16]; + #define SASNVR_SPEED_AUTO 0x00 + #define SASNVR_SPEED_1_5GB 0x01 + #define SASNVR_SPEED_3GB 0x02 + #define SASNVR_SPEED_6GB 0x03 + #define SASNVR_SPEED_12GB 0x04 + u8 phy_mux[16]; + #define SASNVR_MUX_DISABLED 0x00 + #define SASNVR_MUX_1_5GB 0x01 + #define SASNVR_MUX_3GB 0x02 + #define SASNVR_MUX_6GB 0x03 + u8 phy_flags[16]; + #define SASNVR_PHF_DISABLED 0x01 + #define SASNVR_PHF_RD_ONLY 0x02 + u8 sort_type; + #define SASNVR_SORT_SAS_ADDR 0x00 + #define SASNVR_SORT_H308_CONN 0x01 + #define SASNVR_SORT_PHY_ID 0x02 + #define SASNVR_SORT_SLOT_ID 0x03 + u8 dpm_reqcmd_lmt; + u8 dpm_stndby_time; + u8 dpm_active_time; + u8 phy_target_id[16]; + #define SASNVR_PTI_DISABLED 0xFF + u8 virt_ses_mode; + #define SASNVR_VSMH_DISABLED 0x00 + u8 read_write_mode; + #define SASNVR_RWM_DEFAULT 0x00 + u8 link_down_to; + u8 reserved[0xA1]; +}; + +typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr); + +struct esas2r_sg_context { + struct esas2r_adapter *adapter; + struct esas2r_request *first_req; + u32 length; + u8 *cur_offset; + PGETPHYSADDR get_phys_addr; + union { + struct { + struct atto_vda_sge *curr; + struct atto_vda_sge *last; + struct atto_vda_sge *limit; + struct atto_vda_sge *chain; + } a64; + struct { + struct atto_physical_region_description *curr; + struct atto_physical_region_description *chain; + u32 sgl_max_cnt; + u32 sge_cnt; + } prd; + } sge; + struct scatterlist *cur_sgel; + u8 *exp_offset; + int num_sgel; + int sgel_count; +}; + +struct esas2r_target { + u8 flags; + #define TF_PASS_THRU 0x01 + #define TF_USED 0x02 + u8 new_target_state; + u8 target_state; + u8 buffered_target_state; +#define TS_NOT_PRESENT 0x00 +#define TS_PRESENT 0x05 +#define TS_LUN_CHANGE 0x06 +#define TS_INVALID 0xFF + u32 block_size; + u32 inter_block; + u32 inter_byte; + u16 virt_targ_id; + u16 phys_targ_id; + u8 identifier_len; + u64 sas_addr; + u8 identifier[60]; + struct atto_vda_ae_lu lu_event; +}; + +struct esas2r_request { + struct list_head comp_list; + struct list_head req_list; + union atto_vda_req *vrq; + struct esas2r_mem_desc *vrq_md; + union { + void *data_buf; + union atto_vda_rsp_data *vda_rsp_data; + }; + u8 *sense_buf; + struct list_head sg_table_head; + struct esas2r_mem_desc *sg_table; + u32 timeout; + #define RQ_TIMEOUT_S1 0xFFFFFFFF + #define RQ_TIMEOUT_S2 0xFFFFFFFE + #define RQ_MAX_TIMEOUT 0xFFFFFFFD + u16 target_id; + u8 req_type; + #define RT_INI_REQ 0x01 + #define RT_DISC_REQ 0x02 + u8 sense_len; + union atto_vda_func_rsp func_rsp; + RQCALLBK comp_cb; + RQCALLBK interrupt_cb; + void *interrupt_cx; + u8 flags; + #define RF_1ST_IBLK_BASE 0x04 + #define RF_FAILURE_OK 0x08 + u8 req_stat; + u16 vda_req_sz; + #define RQ_SIZE_DEFAULT 0 + u64 lba; + RQCALLBK aux_req_cb; + void *aux_req_cx; + u32 blk_len; + u32 max_blk_len; + union { + struct scsi_cmnd *cmd; + u8 *task_management_status_ptr; + }; +}; + +struct esas2r_flash_context { + struct esas2r_flash_img *fi; + RQCALLBK interrupt_cb; + u8 *sgc_offset; + u8 *scratch; + u32 fi_hdr_len; + u8 task; + #define FMTSK_ERASE_BOOT 0 + #define FMTSK_WRTBIOS 1 + #define FMTSK_READBIOS 2 + #define FMTSK_WRTMAC 3 + #define FMTSK_READMAC 4 + #define FMTSK_WRTEFI 5 + #define FMTSK_READEFI 6 + #define FMTSK_WRTCFG 7 + #define FMTSK_READCFG 8 + u8 func; + u16 num_comps; + u32 cmp_len; + u32 flsh_addr; + u32 curr_len; + u8 comp_typ; + struct esas2r_sg_context sgc; +}; + +struct esas2r_disc_context { + u8 disc_evt; + #define DCDE_DEV_CHANGE 0x01 + #define DCDE_DEV_SCAN 0x02 + u8 state; + #define DCS_DEV_RMV 0x00 + #define DCS_DEV_ADD 0x01 + #define DCS_BLOCK_DEV_SCAN 0x02 + #define DCS_RAID_GRP_INFO 0x03 + #define DCS_PART_INFO 0x04 + #define DCS_PT_DEV_INFO 0x05 + #define DCS_PT_DEV_ADDR 0x06 + #define DCS_DISC_DONE 0xFF + u16 flags; + #define DCF_DEV_CHANGE 0x0001 + #define DCF_DEV_SCAN 0x0002 + #define DCF_POLLED 0x8000 + u32 interleave; + u32 block_size; + u16 dev_ix; + u8 part_num; + u8 raid_grp_ix; + char raid_grp_name[16]; + struct esas2r_target *curr_targ; + u16 curr_virt_id; + u16 curr_phys_id; + u8 scan_gen; + u8 dev_addr_type; + u64 sas_addr; +}; + +struct esas2r_mem_desc { + struct list_head next_desc; + void *virt_addr; + u64 phys_addr; + void *pad; + void *esas2r_data; + u32 esas2r_param; + u32 size; +}; + +enum fw_event_type { + fw_event_null, + fw_event_lun_change, + fw_event_present, + fw_event_not_present, + fw_event_vda_ae +}; + +struct esas2r_vda_ae { + u32 signature; +#define ESAS2R_VDA_EVENT_SIG 0x4154544F + u8 bus_number; + u8 devfn; + u8 pad[2]; + union atto_vda_ae vda_ae; +}; + +struct esas2r_fw_event_work { + struct list_head list; + struct delayed_work work; + struct esas2r_adapter *a; + enum fw_event_type type; + u8 data[sizeof(struct esas2r_vda_ae)]; +}; + +enum state { + FW_INVALID_ST, + FW_STATUS_ST, + FW_COMMAND_ST +}; + +struct esas2r_firmware { + enum state state; + struct esas2r_flash_img header; + u8 *data; + u64 phys; + int orig_len; + void *header_buff; + u64 header_buff_phys; +}; + +struct esas2r_adapter { + struct esas2r_target targetdb[ESAS2R_MAX_TARGETS]; + struct esas2r_target *targetdb_end; + unsigned char *regs; + unsigned char *data_window; + long flags; + #define AF_PORT_CHANGE 0 + #define AF_CHPRST_NEEDED 1 + #define AF_CHPRST_PENDING 2 + #define AF_CHPRST_DETECTED 3 + #define AF_BUSRST_NEEDED 4 + #define AF_BUSRST_PENDING 5 + #define AF_BUSRST_DETECTED 6 + #define AF_DISABLED 7 + #define AF_FLASH_LOCK 8 + #define AF_OS_RESET 9 + #define AF_FLASHING 10 + #define AF_POWER_MGT 11 + #define AF_NVR_VALID 12 + #define AF_DEGRADED_MODE 13 + #define AF_DISC_PENDING 14 + #define AF_TASKLET_SCHEDULED 15 + #define AF_HEARTBEAT 16 + #define AF_HEARTBEAT_ENB 17 + #define AF_NOT_PRESENT 18 + #define AF_CHPRST_STARTED 19 + #define AF_FIRST_INIT 20 + #define AF_POWER_DOWN 21 + #define AF_DISC_IN_PROG 22 + #define AF_COMM_LIST_TOGGLE 23 + #define AF_LEGACY_SGE_MODE 24 + #define AF_DISC_POLLED 25 + long flags2; + #define AF2_SERIAL_FLASH 0 + #define AF2_DEV_SCAN 1 + #define AF2_DEV_CNT_OK 2 + #define AF2_COREDUMP_AVAIL 3 + #define AF2_COREDUMP_SAVED 4 + #define AF2_VDA_POWER_DOWN 5 + #define AF2_THUNDERLINK 6 + #define AF2_THUNDERBOLT 7 + #define AF2_INIT_DONE 8 + #define AF2_INT_PENDING 9 + #define AF2_TIMER_TICK 10 + #define AF2_IRQ_CLAIMED 11 + #define AF2_MSI_ENABLED 12 + atomic_t disable_cnt; + atomic_t dis_ints_cnt; + u32 int_stat; + u32 int_mask; + u32 volatile *outbound_copy; + struct list_head avail_request; + spinlock_t request_lock; + spinlock_t sg_list_lock; + spinlock_t queue_lock; + spinlock_t mem_lock; + struct list_head free_sg_list_head; + struct esas2r_mem_desc *sg_list_mds; + struct list_head active_list; + struct list_head defer_list; + struct esas2r_request **req_table; + union { + u16 prev_dev_cnt; + u32 heartbeat_time; + #define ESAS2R_HEARTBEAT_TIME (3000) + }; + u32 chip_uptime; + #define ESAS2R_CHP_UPTIME_MAX (60000) + #define ESAS2R_CHP_UPTIME_CNT (20000) + u64 uncached_phys; + u8 *uncached; + struct esas2r_sas_nvram *nvram; + struct esas2r_request general_req; + u8 init_msg; + #define ESAS2R_INIT_MSG_START 1 + #define ESAS2R_INIT_MSG_INIT 2 + #define ESAS2R_INIT_MSG_GET_INIT 3 + #define ESAS2R_INIT_MSG_REINIT 4 + u16 cmd_ref_no; + u32 fw_version; + u32 fw_build; + u32 chip_init_time; + #define ESAS2R_CHPRST_TIME (180000) + #define ESAS2R_CHPRST_WAIT_TIME (2000) + u32 last_tick_time; + u32 window_base; + RQBUILDSGL build_sgl; + struct esas2r_request *first_ae_req; + u32 list_size; + u32 last_write; + u32 last_read; + u16 max_vdareq_size; + u16 disc_wait_cnt; + struct esas2r_mem_desc inbound_list_md; + struct esas2r_mem_desc outbound_list_md; + struct esas2r_disc_context disc_ctx; + u8 *disc_buffer; + u32 disc_start_time; + u32 disc_wait_time; + u32 flash_ver; + char flash_rev[16]; + char fw_rev[16]; + char image_type[16]; + struct esas2r_flash_context flash_context; + u32 num_targets_backend; + u32 ioctl_tunnel; + struct tasklet_struct tasklet; + struct pci_dev *pcid; + struct Scsi_Host *host; + unsigned int index; + char name[32]; + struct timer_list timer; + struct esas2r_firmware firmware; + wait_queue_head_t nvram_waiter; + int nvram_command_done; + wait_queue_head_t fm_api_waiter; + int fm_api_command_done; + wait_queue_head_t vda_waiter; + int vda_command_done; + u8 *vda_buffer; + u64 ppvda_buffer; +#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data)) +#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ) + wait_queue_head_t fs_api_waiter; + int fs_api_command_done; + u64 ppfs_api_buffer; + u8 *fs_api_buffer; + u32 fs_api_buffer_size; + wait_queue_head_t buffered_ioctl_waiter; + int buffered_ioctl_done; + int uncached_size; + struct workqueue_struct *fw_event_q; + struct list_head fw_event_list; + spinlock_t fw_event_lock; + u8 fw_events_off; /* if '1', then ignore events */ + char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN]; + /* + * intr_mode stores the interrupt mode currently being used by this + * adapter. it is based on the interrupt_mode module parameter, but + * can be changed based on the ability (or not) to utilize the + * mode requested by the parameter. + */ + int intr_mode; +#define INTR_MODE_LEGACY 0 +#define INTR_MODE_MSI 1 +#define INTR_MODE_MSIX 2 + struct esas2r_sg_context fm_api_sgc; + u8 *save_offset; + struct list_head vrq_mds_head; + struct esas2r_mem_desc *vrq_mds; + int num_vrqs; + struct semaphore fm_api_semaphore; + struct semaphore fs_api_semaphore; + struct semaphore nvram_semaphore; + struct atto_ioctl *local_atto_ioctl; + u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ]; + unsigned int sysfs_fw_created:1; + unsigned int sysfs_fs_created:1; + unsigned int sysfs_vda_created:1; + unsigned int sysfs_hw_created:1; + unsigned int sysfs_live_nvram_created:1; + unsigned int sysfs_default_nvram_created:1; +}; + +/* + * Function Declarations + * SCSI functions + */ +int esas2r_release(struct Scsi_Host *); +const char *esas2r_info(struct Scsi_Host *); +int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *data); +int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg); +int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +u8 handle_hba_ioctl(struct esas2r_adapter *a, + struct atto_ioctl *ioctl_hba); +int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); +int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); +int esas2r_slave_alloc(struct scsi_device *dev); +int esas2r_slave_configure(struct scsi_device *dev); +void esas2r_slave_destroy(struct scsi_device *dev); +int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason); +int esas2r_change_queue_type(struct scsi_device *dev, int type); +long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg); + +/* SCSI error handler (eh) functions */ +int esas2r_eh_abort(struct scsi_cmnd *cmd); +int esas2r_device_reset(struct scsi_cmnd *cmd); +int esas2r_host_reset(struct scsi_cmnd *cmd); +int esas2r_bus_reset(struct scsi_cmnd *cmd); +int esas2r_target_reset(struct scsi_cmnd *cmd); + +/* Internal functions */ +int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, + int index); +int esas2r_cleanup(struct Scsi_Host *host); +int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, + int count); +int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, + int count); +int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count); +int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, + int count); +void esas2r_adapter_tasklet(unsigned long context); +irqreturn_t esas2r_interrupt(int irq, void *dev_id); +irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id); +void esas2r_kickoff_timer(struct esas2r_adapter *a); +int esas2r_suspend(struct pci_dev *pcid, pm_message_t state); +int esas2r_resume(struct pci_dev *pcid); +void esas2r_fw_event_off(struct esas2r_adapter *a); +void esas2r_fw_event_on(struct esas2r_adapter *a); +bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *nvram); +void esas2r_nvram_get_defaults(struct esas2r_adapter *a, + struct esas2r_sas_nvram *nvram); +void esas2r_complete_request_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_reset_detected(struct esas2r_adapter *a); +void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id, + u8 state); +int esas2r_req_status_to_error(u8 req_stat); +void esas2r_kill_adapter(int i); +void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq); +struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a); +u32 esas2r_get_uncached_size(struct esas2r_adapter *a); +bool esas2r_init_adapter_struct(struct esas2r_adapter *a, + void **uncached_area); +bool esas2r_check_adapter(struct esas2r_adapter *a); +bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll); +void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq); +bool esas2r_send_task_mgmt(struct esas2r_adapter *a, + struct esas2r_request *rqaux, u8 task_mgt_func); +void esas2r_do_tasklet_tasks(struct esas2r_adapter *a); +void esas2r_adapter_interrupt(struct esas2r_adapter *a); +void esas2r_do_deferred_processes(struct esas2r_adapter *a); +void esas2r_reset_bus(struct esas2r_adapter *a); +void esas2r_reset_adapter(struct esas2r_adapter *a); +void esas2r_timer_tick(struct esas2r_adapter *a); +const char *esas2r_get_model_name(struct esas2r_adapter *a); +const char *esas2r_get_model_name_short(struct esas2r_adapter *a); +u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time, + u32 *delay); +void esas2r_build_flash_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 cksum, + u32 addr, + u32 length); +void esas2r_build_mgt_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 scan_gen, + u16 dev_index, + u32 length, + void *data); +void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); +void esas2r_build_cli_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u32 cmd_rsp_len); +void esas2r_build_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u8 sub_func); +void esas2r_build_cfg_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u32 length, + void *data); +void esas2r_power_down(struct esas2r_adapter *a); +bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll); +void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq); +u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo); +bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_ioctl_fs *fs, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc); +bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from, + u32 size); +bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from, + u32 size); +bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, + struct esas2r_request *rq, struct esas2r_sg_context *sgc); +void esas2r_force_interrupt(struct esas2r_adapter *a); +void esas2r_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_process_adapter_reset(struct esas2r_adapter *a); +void esas2r_complete_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_dummy_complete(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq); +void esas2r_start_vda_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +bool esas2r_read_flash_rev(struct esas2r_adapter *a); +bool esas2r_read_image_type(struct esas2r_adapter *a); +bool esas2r_nvram_read_direct(struct esas2r_adapter *a); +bool esas2r_nvram_validate(struct esas2r_adapter *a); +void esas2r_nvram_set_defaults(struct esas2r_adapter *a); +bool esas2r_print_flash_rev(struct esas2r_adapter *a); +void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt); +bool esas2r_init_msgs(struct esas2r_adapter *a); +bool esas2r_is_adapter_present(struct esas2r_adapter *a); +void esas2r_nuxi_mgt_data(u8 function, void *data); +void esas2r_nuxi_cfg_data(u8 function, void *data); +void esas2r_nuxi_ae_data(union atto_vda_ae *ae); +void esas2r_reset_chip(struct esas2r_adapter *a); +void esas2r_log_request_failure(struct esas2r_adapter *a, + struct esas2r_request *rq); +void esas2r_polled_interrupt(struct esas2r_adapter *a); +bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, + u8 status); +bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); +bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc); +void esas2r_targ_db_initialize(struct esas2r_adapter *a); +void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify); +void esas2r_targ_db_report_changes(struct esas2r_adapter *a); +struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, + struct esas2r_disc_context *dc); +struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, + struct esas2r_disc_context *dc, + u8 *ident, + u8 ident_len); +void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t); +struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, + u64 *sas_addr); +struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, + void *identifier, + u8 ident_len); +u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id); +struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, + u16 virt_id); +u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a); +void esas2r_disc_initialize(struct esas2r_adapter *a); +void esas2r_disc_start_waiting(struct esas2r_adapter *a); +void esas2r_disc_check_for_work(struct esas2r_adapter *a); +void esas2r_disc_check_complete(struct esas2r_adapter *a); +void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt); +bool esas2r_disc_start_port(struct esas2r_adapter *a); +void esas2r_disc_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); +bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str); +bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, + struct atto_ioctl_vda *vi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc); +void esas2r_queue_fw_event(struct esas2r_adapter *a, + enum fw_event_type type, + void *data, + int data_sz); + +/* Inline functions */ + +/* Allocate a chip scatter/gather list entry */ +static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) +{ + unsigned long flags; + struct list_head *sgl; + struct esas2r_mem_desc *result = NULL; + + spin_lock_irqsave(&a->sg_list_lock, flags); + if (likely(!list_empty(&a->free_sg_list_head))) { + sgl = a->free_sg_list_head.next; + result = list_entry(sgl, struct esas2r_mem_desc, next_desc); + list_del_init(sgl); + } + spin_unlock_irqrestore(&a->sg_list_lock, flags); + + return result; +} + +/* Initialize a scatter/gather context */ +static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc, + struct esas2r_adapter *a, + struct esas2r_request *rq, + struct atto_vda_sge *first) +{ + sgc->adapter = a; + sgc->first_req = rq; + + /* + * set the limit pointer such that an SGE pointer above this value + * would be the first one to overflow the SGL. + */ + sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq + + (sizeof(union + atto_vda_req) / + 8) + - sizeof(struct + atto_vda_sge)); + if (first) { + sgc->sge.a64.last = + sgc->sge.a64.curr = first; + rq->vrq->scsi.sg_list_offset = (u8) + ((u8 *)first - + (u8 *)rq->vrq); + } else { + sgc->sge.a64.last = + sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0]; + rq->vrq->scsi.sg_list_offset = + (u8)offsetof(struct atto_vda_scsi_req, u.sge); + } + sgc->sge.a64.chain = NULL; +} + +static inline void esas2r_rq_init_request(struct esas2r_request *rq, + struct esas2r_adapter *a) +{ + union atto_vda_req *vrq = rq->vrq; + + INIT_LIST_HEAD(&rq->sg_table_head); + rq->data_buf = (void *)(vrq + 1); + rq->interrupt_cb = NULL; + rq->comp_cb = esas2r_complete_request_cb; + rq->flags = 0; + rq->timeout = 0; + rq->req_stat = RS_PENDING; + rq->req_type = RT_INI_REQ; + + /* clear the outbound response */ + rq->func_rsp.dwords[0] = 0; + rq->func_rsp.dwords[1] = 0; + + /* + * clear the size of the VDA request. esas2r_build_sg_list() will + * only allow the size of the request to grow. there are some + * management requests that go through there twice and the second + * time through sets a smaller request size. if this is not modified + * at all we'll set it to the size of the entire VDA request. + */ + rq->vda_req_sz = RQ_SIZE_DEFAULT; + + /* req_table entry should be NULL at this point - if not, halt */ + + if (a->req_table[LOWORD(vrq->scsi.handle)]) + esas2r_bugon(); + + /* fill in the table for this handle so we can get back to the + * request. + */ + a->req_table[LOWORD(vrq->scsi.handle)] = rq; + + /* + * add a reference number to the handle to make it unique (until it + * wraps of course) while preserving the least significant word + */ + vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle; + + /* + * the following formats a SCSI request. the caller can override as + * necessary. clear_vda_request can be called to clear the VDA + * request for another type of request. + */ + vrq->scsi.function = VDA_FUNC_SCSI; + vrq->scsi.sense_len = SENSE_DATA_SZ; + + /* clear out sg_list_offset and chain_offset */ + vrq->scsi.sg_list_offset = 0; + vrq->scsi.chain_offset = 0; + vrq->scsi.flags = 0; + vrq->scsi.reserved = 0; + + /* set the sense buffer to be the data payload buffer */ + vrq->scsi.ppsense_buf + = cpu_to_le64(rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); +} + +static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq, + struct esas2r_adapter *a) +{ + unsigned long flags; + + if (list_empty(&rq->sg_table_head)) + return; + + spin_lock_irqsave(&a->sg_list_lock, flags); + list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head); + spin_unlock_irqrestore(&a->sg_list_lock, flags); +} + +static inline void esas2r_rq_destroy_request(struct esas2r_request *rq, + struct esas2r_adapter *a) + +{ + esas2r_rq_free_sg_lists(rq, a); + a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL; + rq->data_buf = NULL; +} + +static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a) +{ + + return test_bit(AF_BUSRST_NEEDED, &a->flags) || + test_bit(AF_BUSRST_DETECTED, &a->flags) || + test_bit(AF_CHPRST_NEEDED, &a->flags) || + test_bit(AF_CHPRST_DETECTED, &a->flags) || + test_bit(AF_PORT_CHANGE, &a->flags); + +} + +/* + * Build the scatter/gather list for an I/O request according to the + * specifications placed in the esas2r_sg_context. The caller must initialize + * struct esas2r_sg_context prior to the initial call by calling + * esas2r_sgc_init() + */ +static inline bool esas2r_build_sg_list(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0)) + return true; + + return (*a->build_sgl)(a, sgc); +} + +static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a) +{ + if (atomic_inc_return(&a->dis_ints_cnt) == 1) + esas2r_write_register_dword(a, MU_INT_MASK_OUT, + ESAS2R_INT_DIS_MASK); +} + +static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a) +{ + if (atomic_dec_return(&a->dis_ints_cnt) == 0) + esas2r_write_register_dword(a, MU_INT_MASK_OUT, + ESAS2R_INT_ENB_MASK); +} + +/* Schedule a TASKLET to perform non-interrupt tasks that may require delays + * or long completion times. + */ +static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a) +{ + /* make sure we don't schedule twice */ + if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags)) + tasklet_hi_schedule(&a->tasklet); +} + +static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a) +{ + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + (a->nvram->options2 & SASNVR2_HEARTBEAT)) + set_bit(AF_HEARTBEAT_ENB, &a->flags); + else + clear_bit(AF_HEARTBEAT_ENB, &a->flags); +} + +static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a) +{ + clear_bit(AF_HEARTBEAT_ENB, &a->flags); + clear_bit(AF_HEARTBEAT, &a->flags); +} + +/* Set the initial state for resetting the adapter on the next pass through + * esas2r_do_deferred. + */ +static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a) +{ + esas2r_disable_heartbeat(a); + + set_bit(AF_CHPRST_NEEDED, &a->flags); + set_bit(AF_CHPRST_PENDING, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); +} + +/* See if an interrupt is pending on the adapter. */ +static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a) +{ + u32 intstat; + + if (a->int_mask == 0) + return false; + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if ((intstat & a->int_mask) == 0) + return false; + + esas2r_disable_chip_interrupts(a); + + a->int_stat = intstat; + a->int_mask = 0; + + return true; +} + +static inline u16 esas2r_targ_get_id(struct esas2r_target *t, + struct esas2r_adapter *a) +{ + return (u16)(uintptr_t)(t - a->targetdb); +} + +/* Build and start an asynchronous event request */ +static inline void esas2r_start_ae_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + unsigned long flags; + + esas2r_build_ae_req(a, rq); + + spin_lock_irqsave(&a->queue_lock, flags); + esas2r_start_vda_request(a, rq); + spin_unlock_irqrestore(&a->queue_lock, flags); +} + +static inline void esas2r_comp_list_drain(struct esas2r_adapter *a, + struct list_head *comp_list) +{ + struct esas2r_request *rq; + struct list_head *element, *next; + + list_for_each_safe(element, next, comp_list) { + rq = list_entry(element, struct esas2r_request, comp_list); + list_del_init(element); + esas2r_complete_request(a, rq); + } +} + +/* sysfs handlers */ +extern struct bin_attribute bin_attr_fw; +extern struct bin_attribute bin_attr_fs; +extern struct bin_attribute bin_attr_vda; +extern struct bin_attribute bin_attr_hw; +extern struct bin_attribute bin_attr_live_nvram; +extern struct bin_attribute bin_attr_default_nvram; + +#endif /* ESAS2R_H */ diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c new file mode 100644 index 00000000000..1c079f4300a --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_disc.c @@ -0,0 +1,1184 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_disc.c + * esas2r device discovery routines + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +/* Miscellaneous internal discovery routines */ +static void esas2r_disc_abort(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_continue(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a); +static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr); +static bool esas2r_disc_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq); + +/* Internal discovery routines that process the states */ +static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_dev_add(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_part_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); +static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, + struct esas2r_request *rq); +static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq); + +void esas2r_disc_initialize(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *nvr = a->nvram; + + esas2r_trace_enter(); + + clear_bit(AF_DISC_IN_PROG, &a->flags); + clear_bit(AF2_DEV_SCAN, &a->flags2); + clear_bit(AF2_DEV_CNT_OK, &a->flags2); + + a->disc_start_time = jiffies_to_msecs(jiffies); + a->disc_wait_time = nvr->dev_wait_time * 1000; + a->disc_wait_cnt = nvr->dev_wait_count; + + if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS) + a->disc_wait_cnt = ESAS2R_MAX_TARGETS; + + /* + * If we are doing chip reset or power management processing, always + * wait for devices. use the NVRAM device count if it is greater than + * previously discovered devices. + */ + + esas2r_hdebug("starting discovery..."); + + a->general_req.interrupt_cx = NULL; + + if (test_bit(AF_CHPRST_DETECTED, &a->flags) || + test_bit(AF_POWER_MGT, &a->flags)) { + if (a->prev_dev_cnt == 0) { + /* Don't bother waiting if there is nothing to wait + * for. + */ + a->disc_wait_time = 0; + } else { + /* + * Set the device wait count to what was previously + * found. We don't care if the user only configured + * a time because we know the exact count to wait for. + * There is no need to honor the user's wishes to + * always wait the full time. + */ + a->disc_wait_cnt = a->prev_dev_cnt; + + /* + * bump the minimum wait time to 15 seconds since the + * default is 3 (system boot or the boot driver usually + * buys us more time). + */ + if (a->disc_wait_time < 15000) + a->disc_wait_time = 15000; + } + } + + esas2r_trace("disc wait count: %d", a->disc_wait_cnt); + esas2r_trace("disc wait time: %d", a->disc_wait_time); + + if (a->disc_wait_time == 0) + esas2r_disc_check_complete(a); + + esas2r_trace_exit(); +} + +void esas2r_disc_start_waiting(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->mem_lock, flags); + + if (a->disc_ctx.disc_evt) + esas2r_disc_start_port(a); + + spin_unlock_irqrestore(&a->mem_lock, flags); +} + +void esas2r_disc_check_for_work(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + + /* service any pending interrupts first */ + + esas2r_polled_interrupt(a); + + /* + * now, interrupt processing may have queued up a discovery event. go + * see if we have one to start. we couldn't start it in the ISR since + * polled discovery would cause a deadlock. + */ + + esas2r_disc_start_waiting(a); + + if (rq->interrupt_cx == NULL) + return; + + if (rq->req_stat == RS_STARTED + && rq->timeout <= RQ_MAX_TIMEOUT) { + /* wait for the current discovery request to complete. */ + esas2r_wait_request(a, rq); + + if (rq->req_stat == RS_TIMEOUT) { + esas2r_disc_abort(a, rq); + esas2r_local_reset_adapter(a); + return; + } + } + + if (rq->req_stat == RS_PENDING + || rq->req_stat == RS_STARTED) + return; + + esas2r_disc_continue(a, rq); +} + +void esas2r_disc_check_complete(struct esas2r_adapter *a) +{ + unsigned long flags; + + esas2r_trace_enter(); + + /* check to see if we should be waiting for devices */ + if (a->disc_wait_time) { + u32 currtime = jiffies_to_msecs(jiffies); + u32 time = currtime - a->disc_start_time; + + /* + * Wait until the device wait time is exhausted or the device + * wait count is satisfied. + */ + if (time < a->disc_wait_time + && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt + || a->disc_wait_cnt == 0)) { + /* After three seconds of waiting, schedule a scan. */ + if (time >= 3000 + && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + } + + esas2r_trace_exit(); + return; + } + + /* + * We are done waiting...we think. Adjust the wait time to + * consume events after the count is met. + */ + if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2)) + a->disc_wait_time = time + 3000; + + /* If we haven't done a full scan yet, do it now. */ + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + esas2r_trace_exit(); + return; + } + + /* + * Now, if there is still time left to consume events, continue + * waiting. + */ + if (time < a->disc_wait_time) { + esas2r_trace_exit(); + return; + } + } else { + if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) { + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_disc_queue_event(a, DCDE_DEV_SCAN); + spin_unlock_irqrestore(&a->mem_lock, flags); + } + } + + /* We want to stop waiting for devices. */ + a->disc_wait_time = 0; + + if (test_bit(AF_DISC_POLLED, &a->flags) && + test_bit(AF_DISC_IN_PROG, &a->flags)) { + /* + * Polled discovery is still pending so continue the active + * discovery until it is done. At that point, we will stop + * polled discovery and transition to interrupt driven + * discovery. + */ + } else { + /* + * Done waiting for devices. Note that we get here immediately + * after deferred waiting completes because that is interrupt + * driven; i.e. There is no transition. + */ + esas2r_disc_fix_curr_requests(a); + clear_bit(AF_DISC_PENDING, &a->flags); + + /* + * We have deferred target state changes until now because we + * don't want to report any removals (due to the first arrival) + * until the device wait time expires. + */ + set_bit(AF_PORT_CHANGE, &a->flags); + } + + esas2r_trace_exit(); +} + +void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt) +{ + struct esas2r_disc_context *dc = &a->disc_ctx; + + esas2r_trace_enter(); + + esas2r_trace("disc_event: %d", disc_evt); + + /* Initialize the discovery context */ + dc->disc_evt |= disc_evt; + + /* + * Don't start discovery before or during polled discovery. if we did, + * we would have a deadlock if we are in the ISR already. + */ + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_POLLED, &a->flags)) + esas2r_disc_start_port(a); + + esas2r_trace_exit(); +} + +bool esas2r_disc_start_port(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + struct esas2r_disc_context *dc = &a->disc_ctx; + bool ret; + + esas2r_trace_enter(); + + if (test_bit(AF_DISC_IN_PROG, &a->flags)) { + esas2r_trace_exit(); + + return false; + } + + /* If there is a discovery waiting, process it. */ + if (dc->disc_evt) { + if (test_bit(AF_DISC_POLLED, &a->flags) + && a->disc_wait_time == 0) { + /* + * We are doing polled discovery, but we no longer want + * to wait for devices. Stop polled discovery and + * transition to interrupt driven discovery. + */ + + esas2r_trace_exit(); + + return false; + } + } else { + /* Discovery is complete. */ + + esas2r_hdebug("disc done"); + + set_bit(AF_PORT_CHANGE, &a->flags); + + esas2r_trace_exit(); + + return false; + } + + /* Handle the discovery context */ + esas2r_trace("disc_evt: %d", dc->disc_evt); + set_bit(AF_DISC_IN_PROG, &a->flags); + dc->flags = 0; + + if (test_bit(AF_DISC_POLLED, &a->flags)) + dc->flags |= DCF_POLLED; + + rq->interrupt_cx = dc; + rq->req_stat = RS_SUCCESS; + + /* Decode the event code */ + if (dc->disc_evt & DCDE_DEV_SCAN) { + dc->disc_evt &= ~DCDE_DEV_SCAN; + + dc->flags |= DCF_DEV_SCAN; + dc->state = DCS_BLOCK_DEV_SCAN; + } else if (dc->disc_evt & DCDE_DEV_CHANGE) { + dc->disc_evt &= ~DCDE_DEV_CHANGE; + + dc->flags |= DCF_DEV_CHANGE; + dc->state = DCS_DEV_RMV; + } + + /* Continue interrupt driven discovery */ + if (!test_bit(AF_DISC_POLLED, &a->flags)) + ret = esas2r_disc_continue(a, rq); + else + ret = true; + + esas2r_trace_exit(); + + return ret; +} + +static bool esas2r_disc_continue(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + + /* Device discovery/removal */ + while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) { + rslt = false; + + switch (dc->state) { + case DCS_DEV_RMV: + + rslt = esas2r_disc_dev_remove(a, rq); + break; + + case DCS_DEV_ADD: + + rslt = esas2r_disc_dev_add(a, rq); + break; + + case DCS_BLOCK_DEV_SCAN: + + rslt = esas2r_disc_block_dev_scan(a, rq); + break; + + case DCS_RAID_GRP_INFO: + + rslt = esas2r_disc_raid_grp_info(a, rq); + break; + + case DCS_PART_INFO: + + rslt = esas2r_disc_part_info(a, rq); + break; + + case DCS_PT_DEV_INFO: + + rslt = esas2r_disc_passthru_dev_info(a, rq); + break; + case DCS_PT_DEV_ADDR: + + rslt = esas2r_disc_passthru_dev_addr(a, rq); + break; + case DCS_DISC_DONE: + + dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN); + break; + + default: + + esas2r_bugon(); + dc->state = DCS_DISC_DONE; + break; + } + + if (rslt) + return true; + } + + /* Discovery is done...for now. */ + rq->interrupt_cx = NULL; + + if (!test_bit(AF_DISC_PENDING, &a->flags)) + esas2r_disc_fix_curr_requests(a); + + clear_bit(AF_DISC_IN_PROG, &a->flags); + + /* Start the next discovery. */ + return esas2r_disc_start_port(a); +} + +static bool esas2r_disc_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + unsigned long flags; + + /* Set the timeout to a minimum value. */ + if (rq->timeout < ESAS2R_DEFAULT_TMO) + rq->timeout = ESAS2R_DEFAULT_TMO; + + /* + * Override the request type to distinguish discovery requests. If we + * end up deferring the request, esas2r_disc_local_start_request() + * will be called to restart it. + */ + rq->req_type = RT_DISC_REQ; + + spin_lock_irqsave(&a->queue_lock, flags); + + if (!test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_FLASHING, &a->flags)) + esas2r_disc_local_start_request(a, rq); + else + list_add_tail(&rq->req_list, &a->defer_list); + + spin_unlock_irqrestore(&a->queue_lock, flags); + + return true; +} + +void esas2r_disc_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_trace_enter(); + + list_add_tail(&rq->req_list, &a->active_list); + + esas2r_start_vda_request(a, rq); + + esas2r_trace_exit(); + + return; +} + +static void esas2r_disc_abort(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + + esas2r_trace_enter(); + + /* abort the current discovery */ + + dc->state = DCS_DISC_DONE; + + esas2r_trace_exit(); +} + +static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + + esas2r_trace_enter(); + + esas2r_rq_init_request(rq, a); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_DEV_SCAN, + 0, + 0, + 0, + NULL); + + rq->comp_cb = esas2r_disc_block_dev_scan_cb; + + rq->timeout = 30000; + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SUCCESS) + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix = 0; + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vda_grp_info *grpinfo; + + esas2r_trace_enter(); + + esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix); + + if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) { + dc->state = DCS_DISC_DONE; + + esas2r_trace_exit(); + + return false; + } + + esas2r_rq_init_request(rq, a); + + grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; + + memset(grpinfo, 0, sizeof(struct atto_vda_grp_info)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_GRP_INFO, + dc->scan_gen, + 0, + sizeof(struct atto_vda_grp_info), + NULL); + + grpinfo->grp_index = dc->raid_grp_ix; + + rq->comp_cb = esas2r_disc_raid_grp_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vda_grp_info *grpinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->raid_grp_ix = 0; + goto done; + } + + if (rq->req_stat == RS_SUCCESS) { + grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info; + + if (grpinfo->status != VDA_GRP_STAT_ONLINE + && grpinfo->status != VDA_GRP_STAT_DEGRADED) { + /* go to the next group. */ + + dc->raid_grp_ix++; + } else { + memcpy(&dc->raid_grp_name[0], + &grpinfo->grp_name[0], + sizeof(grpinfo->grp_name)); + + dc->interleave = le32_to_cpu(grpinfo->interleave); + dc->block_size = le32_to_cpu(grpinfo->block_size); + + dc->state = DCS_PART_INFO; + dc->part_num = 0; + } + } else { + if (!(rq->req_stat == RS_GRP_INVALID)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for RAID group info failed - " + "returned with %x", + rq->req_stat); + } + + dc->dev_ix = 0; + dc->state = DCS_PT_DEV_INFO; + } + +done: + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_part_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vdapart_info *partinfo; + + esas2r_trace_enter(); + + esas2r_trace("part_num: %d", dc->part_num); + + if (dc->part_num >= VDA_MAX_PARTITIONS) { + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix++; + + esas2r_trace_exit(); + + return false; + } + + esas2r_rq_init_request(rq, a); + + partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; + + memset(partinfo, 0, sizeof(struct atto_vdapart_info)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_PART_INFO, + dc->scan_gen, + 0, + sizeof(struct atto_vdapart_info), + NULL); + + partinfo->part_no = dc->part_num; + + memcpy(&partinfo->grp_name[0], + &dc->raid_grp_name[0], + sizeof(partinfo->grp_name)); + + rq->comp_cb = esas2r_disc_part_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_part_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vdapart_info *partinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->raid_grp_ix = 0; + dc->state = DCS_RAID_GRP_INFO; + } else if (rq->req_stat == RS_SUCCESS) { + partinfo = &rq->vda_rsp_data->mgt_data.data.part_info; + + dc->part_num = partinfo->part_no; + + dc->curr_virt_id = le16_to_cpu(partinfo->target_id); + + esas2r_targ_db_add_raid(a, dc); + + dc->part_num++; + } else { + if (!(rq->req_stat == RS_PART_LAST)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for RAID group partition info " + "failed - status:%d", rq->req_stat); + } + + dc->state = DCS_RAID_GRP_INFO; + dc->raid_grp_ix++; + } + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_vda_devinfo *devinfo; + + esas2r_trace_enter(); + + esas2r_trace("dev_ix: %d", dc->dev_ix); + + esas2r_rq_init_request(rq, a); + + devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; + + memset(devinfo, 0, sizeof(struct atto_vda_devinfo)); + + esas2r_build_mgt_req(a, + rq, + VDAMGT_DEV_PT_INFO, + dc->scan_gen, + dc->dev_ix, + sizeof(struct atto_vda_devinfo), + NULL); + + rq->comp_cb = esas2r_disc_passthru_dev_info_cb; + + rq->interrupt_cx = dc; + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + unsigned long flags; + struct atto_vda_devinfo *devinfo; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + if (rq->req_stat == RS_SCAN_GEN) { + dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation; + dc->dev_ix = 0; + dc->state = DCS_PT_DEV_INFO; + } else if (rq->req_stat == RS_SUCCESS) { + devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info; + + dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index); + + dc->curr_virt_id = le16_to_cpu(devinfo->target_id); + + if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) { + dc->curr_phys_id = + le16_to_cpu(devinfo->phys_target_id); + dc->dev_addr_type = ATTO_GDA_AT_PORT; + dc->state = DCS_PT_DEV_ADDR; + + esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); + esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); + } else { + dc->dev_ix++; + } + } else { + if (!(rq->req_stat == RS_DEV_INVALID)) { + esas2r_log(ESAS2R_LOG_WARN, + "A request for device information failed - " + "status:%d", rq->req_stat); + } + + dc->state = DCS_DISC_DONE; + } + + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + bool rslt; + struct atto_ioctl *hi; + struct esas2r_sg_context sgc; + + esas2r_trace_enter(); + + esas2r_rq_init_request(rq, a); + + /* format the request. */ + + sgc.cur_offset = NULL; + sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr; + sgc.length = offsetof(struct atto_ioctl, data) + + sizeof(struct atto_hba_get_device_address); + + esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge); + + esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA); + + if (!esas2r_build_sg_list(a, rq, &sgc)) { + esas2r_rq_destroy_request(rq, a); + + esas2r_trace_exit(); + + return false; + } + + rq->comp_cb = esas2r_disc_passthru_dev_addr_cb; + + rq->interrupt_cx = dc; + + /* format the IOCTL data. */ + + hi = (struct atto_ioctl *)a->disc_buffer; + + memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN); + + hi->version = ATTO_VER_GET_DEV_ADDR0; + hi->function = ATTO_FUNC_GET_DEV_ADDR; + hi->flags = HBAF_TUNNEL; + + hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id); + hi->data.get_dev_addr.addr_type = dc->dev_addr_type; + + /* start it up. */ + + rslt = esas2r_disc_start_request(a, rq); + + esas2r_trace_exit(); + + return rslt; +} + +static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t = NULL; + unsigned long flags; + struct atto_ioctl *hi; + u16 addrlen; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->mem_lock, flags); + + hi = (struct atto_ioctl *)a->disc_buffer; + + if (rq->req_stat == RS_SUCCESS + && hi->status == ATTO_STS_SUCCESS) { + addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len); + + if (dc->dev_addr_type == ATTO_GDA_AT_PORT) { + if (addrlen == sizeof(u64)) + memcpy(&dc->sas_addr, + &hi->data.get_dev_addr.address[0], + addrlen); + else + memset(&dc->sas_addr, 0, sizeof(dc->sas_addr)); + + /* Get the unique identifier. */ + dc->dev_addr_type = ATTO_GDA_AT_UNIQUE; + + goto next_dev_addr; + } else { + /* Add the pass through target. */ + if (HIBYTE(addrlen) == 0) { + t = esas2r_targ_db_add_pthru(a, + dc, + &hi->data. + get_dev_addr. + address[0], + (u8)hi->data. + get_dev_addr. + addr_len); + + if (t) + memcpy(&t->sas_addr, &dc->sas_addr, + sizeof(t->sas_addr)); + } else { + /* getting the back end data failed */ + + esas2r_log(ESAS2R_LOG_WARN, + "an error occurred retrieving the " + "back end data (%s:%d)", + __func__, + __LINE__); + } + } + } else { + /* getting the back end data failed */ + + esas2r_log(ESAS2R_LOG_WARN, + "an error occurred retrieving the back end data - " + "rq->req_stat:%d hi->status:%d", + rq->req_stat, hi->status); + } + + /* proceed to the next device. */ + + if (dc->flags & DCF_DEV_SCAN) { + dc->dev_ix++; + dc->state = DCS_PT_DEV_INFO; + } else if (dc->flags & DCF_DEV_CHANGE) { + dc->curr_targ++; + dc->state = DCS_DEV_ADD; + } else { + esas2r_bugon(); + } + +next_dev_addr: + esas2r_rq_destroy_request(rq, a); + + /* continue discovery if it's interrupt driven */ + + if (!(dc->flags & DCF_POLLED)) + esas2r_disc_continue(a, rq); + + spin_unlock_irqrestore(&a->mem_lock, flags); + + esas2r_trace_exit(); +} + +static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = sgc->adapter; + + if (sgc->length > ESAS2R_DISC_BUF_LEN) + esas2r_bugon(); + + *addr = a->uncached_phys + + (u64)((u8 *)a->disc_buffer - a->uncached); + + return sgc->length; +} + +static bool esas2r_disc_dev_remove(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t; + struct esas2r_target *t2; + + esas2r_trace_enter(); + + /* process removals. */ + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->new_target_state != TS_NOT_PRESENT) + continue; + + t->new_target_state = TS_INVALID; + + /* remove the right target! */ + + t2 = + esas2r_targ_db_find_by_virt_id(a, + esas2r_targ_get_id(t, + a)); + + if (t2) + esas2r_targ_db_remove(a, t2); + } + + /* removals complete. process arrivals. */ + + dc->state = DCS_DEV_ADD; + dc->curr_targ = a->targetdb; + + esas2r_trace_exit(); + + return false; +} + +static bool esas2r_disc_dev_add(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_disc_context *dc = + (struct esas2r_disc_context *)rq->interrupt_cx; + struct esas2r_target *t = dc->curr_targ; + + if (t >= a->targetdb_end) { + /* done processing state changes. */ + + dc->state = DCS_DISC_DONE; + } else if (t->new_target_state == TS_PRESENT) { + struct atto_vda_ae_lu *luevt = &t->lu_event; + + esas2r_trace_enter(); + + /* clear this now in case more events come in. */ + + t->new_target_state = TS_INVALID; + + /* setup the discovery context for adding this device. */ + + dc->curr_virt_id = esas2r_targ_get_id(t, a); + + if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) + && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) { + dc->block_size = luevt->id.tgtlun_raid.dwblock_size; + dc->interleave = luevt->id.tgtlun_raid.dwinterleave; + } else { + dc->block_size = 0; + dc->interleave = 0; + } + + /* determine the device type being added. */ + + if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) { + if (luevt->dwevent & VDAAE_LU_PHYS_ID) { + dc->state = DCS_PT_DEV_ADDR; + dc->dev_addr_type = ATTO_GDA_AT_PORT; + dc->curr_phys_id = luevt->wphys_target_id; + } else { + esas2r_log(ESAS2R_LOG_WARN, + "luevt->dwevent does not have the " + "VDAAE_LU_PHYS_ID bit set (%s:%d)", + __func__, __LINE__); + } + } else { + dc->raid_grp_name[0] = 0; + + esas2r_targ_db_add_raid(a, dc); + } + + esas2r_trace("curr_virt_id: %d", dc->curr_virt_id); + esas2r_trace("curr_phys_id: %d", dc->curr_phys_id); + esas2r_trace("dwevent: %d", luevt->dwevent); + + esas2r_trace_exit(); + } + + if (dc->state == DCS_DEV_ADD) { + /* go to the next device. */ + + dc->curr_targ++; + } + + return false; +} + +/* + * When discovery is done, find all requests on defer queue and + * test if they need to be modified. If a target is no longer present + * then complete the request with RS_SEL. Otherwise, update the + * target_id since after a hibernate it can be a different value. + * VDA does not make passthrough target IDs persistent. + */ +static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a) +{ + unsigned long flags; + struct esas2r_target *t; + struct esas2r_request *rq; + struct list_head *element; + + /* update virt_targ_id in any outstanding esas2r_requests */ + + spin_lock_irqsave(&a->queue_lock, flags); + + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + t = a->targetdb + rq->target_id; + + if (t->target_state == TS_PRESENT) + rq->vrq->scsi.target_id = le16_to_cpu( + t->virt_targ_id); + else + rq->req_stat = RS_SEL; + } + + } + + spin_unlock_irqrestore(&a->queue_lock, flags); +} diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c new file mode 100644 index 00000000000..b7dc59fca7a --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -0,0 +1,1521 @@ + +/* + * linux/drivers/scsi/esas2r/esas2r_flash.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* local macro defs */ +#define esas2r_nvramcalc_cksum(n) \ + (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \ + SASNVR_CKSUM_SEED)) +#define esas2r_nvramcalc_xor_cksum(n) \ + (esas2r_calc_byte_xor_cksum((u8 *)(n), \ + sizeof(struct esas2r_sas_nvram), 0)) + +#define ESAS2R_FS_DRVR_VER 2 + +static struct esas2r_sas_nvram default_sas_nvram = { + { 'E', 'S', 'A', 'S' }, /* signature */ + SASNVR_VERSION, /* version */ + 0, /* checksum */ + 31, /* max_lun_for_target */ + SASNVR_PCILAT_MAX, /* pci_latency */ + SASNVR1_BOOT_DRVR, /* options1 */ + SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */ + | SASNVR2_SW_MUX_CTRL, + SASNVR_COAL_DIS, /* int_coalescing */ + SASNVR_CMDTHR_NONE, /* cmd_throttle */ + 3, /* dev_wait_time */ + 1, /* dev_wait_count */ + 0, /* spin_up_delay */ + 0, /* ssp_align_rate */ + { 0x50, 0x01, 0x08, 0x60, /* sas_addr */ + 0x00, 0x00, 0x00, 0x00 }, + { SASNVR_SPEED_AUTO }, /* phy_speed */ + { SASNVR_MUX_DISABLED }, /* SAS multiplexing */ + { 0 }, /* phy_flags */ + SASNVR_SORT_SAS_ADDR, /* sort_type */ + 3, /* dpm_reqcmd_lmt */ + 3, /* dpm_stndby_time */ + 0, /* dpm_active_time */ + { 0 }, /* phy_target_id */ + SASNVR_VSMH_DISABLED, /* virt_ses_mode */ + SASNVR_RWM_DEFAULT, /* read_write_mode */ + 0, /* link down timeout */ + { 0 } /* reserved */ +}; + +static u8 cmd_to_fls_func[] = { + 0xFF, + VDA_FLASH_READ, + VDA_FLASH_BEGINW, + VDA_FLASH_WRITE, + VDA_FLASH_COMMIT, + VDA_FLASH_CANCEL +}; + +static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed) +{ + u32 cksum = seed; + u8 *p = (u8 *)&cksum; + + while (len) { + if (((uintptr_t)addr & 3) == 0) + break; + + cksum = cksum ^ *addr; + addr++; + len--; + } + while (len >= sizeof(u32)) { + cksum = cksum ^ *(u32 *)addr; + addr += 4; + len -= 4; + } + while (len--) { + cksum = cksum ^ *addr; + addr++; + } + return p[0] ^ p[1] ^ p[2] ^ p[3]; +} + +static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed) +{ + u8 *p = (u8 *)addr; + u8 cksum = seed; + + while (len--) + cksum = cksum + p[len]; + return cksum; +} + +/* Interrupt callback to process FM API write requests. */ +static void esas2r_fmapi_callback(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + + if (rq->req_stat == RS_SUCCESS) { + /* Last request was successful. See what to do now. */ + switch (vrq->sub_func) { + case VDA_FLASH_BEGINW: + if (fc->sgc.cur_offset == NULL) + goto commit; + + vrq->sub_func = VDA_FLASH_WRITE; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_WRITE: +commit: + vrq->sub_func = VDA_FLASH_COMMIT; + rq->req_stat = RS_PENDING; + rq->interrupt_cb = fc->interrupt_cb; + break; + + default: + break; + } + } + + if (rq->req_stat != RS_PENDING) + /* + * All done. call the real callback to complete the FM API + * request. We should only get here if a BEGINW or WRITE + * operation failed. + */ + (*fc->interrupt_cb)(a, rq); +} + +/* + * Build a flash request based on the flash context. The request status + * is filled in on an error. + */ +static void build_flash_msg(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_sg_context *sgc = &fc->sgc; + u8 cksum = 0; + + /* calculate the checksum */ + if (fc->func == VDA_FLASH_BEGINW) { + if (sgc->cur_offset) + cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset, + sgc->length, + 0); + rq->interrupt_cb = esas2r_fmapi_callback; + } else { + rq->interrupt_cb = fc->interrupt_cb; + } + esas2r_build_flash_req(a, + rq, + fc->func, + cksum, + fc->flsh_addr, + sgc->length); + + esas2r_rq_free_sg_lists(rq, a); + + /* + * remember the length we asked for. we have to keep track of + * the current amount done so we know how much to compare when + * doing the verification phase. + */ + fc->curr_len = fc->sgc.length; + + if (sgc->cur_offset) { + /* setup the S/G context to build the S/G table */ + esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + rq->req_stat = RS_BUSY; + return; + } + } else { + fc->sgc.length = 0; + } + + /* update the flsh_addr to the next one to write to */ + fc->flsh_addr += fc->curr_len; +} + +/* determine the method to process the flash request */ +static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + /* + * assume we have more to do. if we return with the status set to + * RS_PENDING, FM API tasks will continue. + */ + rq->req_stat = RS_PENDING; + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + /* not suppported for now */; + else + build_flash_msg(a, rq); + + return rq->req_stat == RS_PENDING; +} + +/* boot image fixer uppers called before downloading the image. */ +static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS]; + struct esas2r_pc_image *pi; + struct esas2r_boot_header *bh; + + pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset); + bh = + (struct esas2r_boot_header *)((u8 *)pi + + le16_to_cpu(pi->header_offset)); + bh->device_id = cpu_to_le16(a->pcid->device); + + /* Recalculate the checksum in the PNP header if there */ + if (pi->pnp_offset) { + u8 *pnp_header_bytes = + ((u8 *)pi + le16_to_cpu(pi->pnp_offset)); + + /* Identifier - dword that starts at byte 10 */ + *((u32 *)&pnp_header_bytes[10]) = + cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor, + a->pcid->subsystem_device)); + + /* Checksum - byte 9 */ + pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes, + 32, 0); + } + + /* Recalculate the checksum needed by the PC */ + pi->checksum = pi->checksum - + esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0); +} + +static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI]; + u32 len = ch->length; + u32 offset = ch->image_offset; + struct esas2r_efi_image *ei; + struct esas2r_boot_header *bh; + + while (len) { + u32 thislen; + + ei = (struct esas2r_efi_image *)((u8 *)fi + offset); + bh = (struct esas2r_boot_header *)((u8 *)ei + + le16_to_cpu( + ei->header_offset)); + bh->device_id = cpu_to_le16(a->pcid->device); + thislen = (u32)le16_to_cpu(bh->image_length) * 512; + + if (thislen > len) + break; + + len -= thislen; + offset += thislen; + } +} + +/* Complete a FM API request with the specified status. */ +static bool complete_fmapi_req(struct esas2r_adapter *a, + struct esas2r_request *rq, u8 fi_stat) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_flash_img *fi = fc->fi; + + fi->status = fi_stat; + fi->driver_error = rq->req_stat; + rq->interrupt_cb = NULL; + rq->req_stat = RS_SUCCESS; + + if (fi_stat != FI_STAT_IMG_VER) + memset(fc->scratch, 0, FM_BUF_SZ); + + esas2r_enable_heartbeat(a); + clear_bit(AF_FLASH_LOCK, &a->flags); + return false; +} + +/* Process each phase of the flash download process. */ +static void fw_download_proc(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_flash_context *fc = + (struct esas2r_flash_context *)rq->interrupt_cx; + struct esas2r_flash_img *fi = fc->fi; + struct esas2r_component_header *ch; + u32 len; + u8 *p, *q; + + /* If the previous operation failed, just return. */ + if (rq->req_stat != RS_SUCCESS) + goto error; + + /* + * If an upload just completed and the compare length is non-zero, + * then we just read back part of the image we just wrote. verify the + * section and continue reading until the entire image is verified. + */ + if (fc->func == VDA_FLASH_READ + && fc->cmp_len) { + ch = &fi->cmp_hdr[fc->comp_typ]; + + p = fc->scratch; + q = (u8 *)fi /* start of the whole gob */ + + ch->image_offset /* start of the current image */ + + ch->length /* end of the current image */ + - fc->cmp_len; /* where we are now */ + + /* + * NOTE - curr_len is the exact count of bytes for the read + * even when the end is read and its not a full buffer + */ + for (len = fc->curr_len; len; len--) + if (*p++ != *q++) + goto error; + + fc->cmp_len -= fc->curr_len; /* # left to compare */ + + /* Update fc and determine the length for the next upload */ + if (fc->cmp_len > FM_BUF_SZ) + fc->sgc.length = FM_BUF_SZ; + else + fc->sgc.length = fc->cmp_len; + + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - (u8 *)fi); + } + + /* + * This code uses a 'while' statement since the next component may + * have a length = zero. This can happen since some components are + * not required. At the end of this 'while' we set up the length + * for the next request and therefore sgc.length can be = 0. + */ + while (fc->sgc.length == 0) { + ch = &fi->cmp_hdr[fc->comp_typ]; + + switch (fc->task) { + case FMTSK_ERASE_BOOT: + /* the BIOS image is written next */ + ch = &fi->cmp_hdr[CH_IT_BIOS]; + if (ch->length == 0) + goto no_bios; + + fc->task = FMTSK_WRTBIOS; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_BIOS; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTBIOS: + /* + * The BIOS image has been written - read it and + * verify it + */ + fc->task = FMTSK_READBIOS; + fc->func = VDA_FLASH_READ; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READBIOS: +no_bios: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The MAC image is written next */ + ch = &fi->cmp_hdr[CH_IT_MAC]; + if (ch->length == 0) + goto no_mac; + + fc->task = FMTSK_WRTMAC; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_MAC; + fc->flsh_addr = FLS_OFFSET_BOOT + + fi->cmp_hdr[CH_IT_BIOS].length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTMAC: + /* The MAC image has been written - read and verify */ + fc->task = FMTSK_READMAC; + fc->func = VDA_FLASH_READ; + fc->flsh_addr -= ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READMAC: +no_mac: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The EFI image is written next */ + ch = &fi->cmp_hdr[CH_IT_EFI]; + if (ch->length == 0) + goto no_efi; + + fc->task = FMTSK_WRTEFI; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_EFI; + fc->flsh_addr = FLS_OFFSET_BOOT + + fi->cmp_hdr[CH_IT_BIOS].length + + fi->cmp_hdr[CH_IT_MAC].length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTEFI: + /* The EFI image has been written - read and verify */ + fc->task = FMTSK_READEFI; + fc->func = VDA_FLASH_READ; + fc->flsh_addr -= ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READEFI: +no_efi: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* The CFG image is written next */ + ch = &fi->cmp_hdr[CH_IT_CFG]; + + if (ch->length == 0) + goto no_cfg; + fc->task = FMTSK_WRTCFG; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_CFG; + fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; + fc->sgc.length = ch->length; + fc->sgc.cur_offset = fc->sgc_offset + + ch->image_offset; + break; + + case FMTSK_WRTCFG: + /* The CFG image has been written - read and verify */ + fc->task = FMTSK_READCFG; + fc->func = VDA_FLASH_READ; + fc->flsh_addr = FLS_OFFSET_CPYR - ch->length; + fc->cmp_len = ch->length; + fc->sgc.length = FM_BUF_SZ; + fc->sgc.cur_offset = fc->sgc_offset + + ((u8 *)fc->scratch - + (u8 *)fi); + break; + + case FMTSK_READCFG: +no_cfg: + /* + * Mark the component header status for the image + * completed + */ + ch->status = CH_STAT_SUCCESS; + + /* + * The download is complete. If in degraded mode, + * attempt a chip reset. + */ + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + esas2r_local_reset_adapter(a); + + a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; + esas2r_print_flash_rev(a); + + /* Update the type of boot image on the card */ + memcpy(a->image_type, fi->rel_version, + sizeof(fi->rel_version)); + complete_fmapi_req(a, rq, FI_STAT_SUCCESS); + return; + } + + /* If verifying, don't try reading more than what's there */ + if (fc->func == VDA_FLASH_READ + && fc->sgc.length > fc->cmp_len) + fc->sgc.length = fc->cmp_len; + } + + /* Build the request to perform the next action */ + if (!load_image(a, rq)) { +error: + if (fc->comp_typ < fi->num_comps) { + ch = &fi->cmp_hdr[fc->comp_typ]; + ch->status = CH_STAT_FAILED; + } + + complete_fmapi_req(a, rq, FI_STAT_FAILED); + } +} + +/* Determine the flash image adaptyp for this adapter */ +static u8 get_fi_adap_type(struct esas2r_adapter *a) +{ + u8 type; + + /* use the device ID to get the correct adap_typ for this HBA */ + switch (a->pcid->device) { + case ATTO_DID_INTEL_IOP348: + type = FI_AT_SUN_LAKE; + break; + + case ATTO_DID_MV_88RC9580: + case ATTO_DID_MV_88RC9580TS: + case ATTO_DID_MV_88RC9580TSE: + case ATTO_DID_MV_88RC9580TL: + type = FI_AT_MV_9580; + break; + + default: + type = FI_AT_UNKNWN; + break; + } + + return type; +} + +/* Size of config + copyright + flash_ver images, 0 for failure. */ +static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver) +{ + u16 *pw = (u16 *)cfg - 1; + u32 sz = 0; + u32 len = length; + + if (len == 0) + len = FM_BUF_SZ; + + if (flash_ver) + *flash_ver = 0; + + while (true) { + u16 type; + u16 size; + + type = le16_to_cpu(*pw--); + size = le16_to_cpu(*pw--); + + if (type != FBT_CPYR + && type != FBT_SETUP + && type != FBT_FLASH_VER) + break; + + if (type == FBT_FLASH_VER + && flash_ver) + *flash_ver = le32_to_cpu(*(u32 *)(pw - 1)); + + sz += size + (2 * sizeof(u16)); + pw -= size / sizeof(u16); + + if (sz > len - (2 * sizeof(u16))) + break; + } + + /* See if we are comparing the size to the specified length */ + if (length && sz != length) + return 0; + + return sz; +} + +/* Verify that the boot image is valid */ +static u8 chk_boot(u8 *boot_img, u32 length) +{ + struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img; + u16 hdroffset = le16_to_cpu(bi->header_offset); + struct esas2r_boot_header *bh; + + if (bi->signature != le16_to_cpu(0xaa55) + || (long)hdroffset > + (long)(65536L - sizeof(struct esas2r_boot_header)) + || (hdroffset & 3) + || (hdroffset < sizeof(struct esas2r_boot_image)) + || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length)) + return 0xff; + + bh = (struct esas2r_boot_header *)((char *)bi + hdroffset); + + if (bh->signature[0] != 'P' + || bh->signature[1] != 'C' + || bh->signature[2] != 'I' + || bh->signature[3] != 'R' + || le16_to_cpu(bh->struct_length) < + (u16)sizeof(struct esas2r_boot_header) + || bh->class_code[2] != 0x01 + || bh->class_code[1] != 0x04 + || bh->class_code[0] != 0x00 + || (bh->code_type != CODE_TYPE_PC + && bh->code_type != CODE_TYPE_OPEN + && bh->code_type != CODE_TYPE_EFI)) + return 0xff; + + return bh->code_type; +} + +/* The sum of all the WORDS of the image */ +static u16 calc_fi_checksum(struct esas2r_flash_context *fc) +{ + struct esas2r_flash_img *fi = fc->fi; + u16 cksum; + u32 len; + u16 *pw; + + for (len = (fi->length - fc->fi_hdr_len) / 2, + pw = (u16 *)((u8 *)fi + fc->fi_hdr_len), + cksum = 0; + len; + len--, pw++) + cksum = cksum + le16_to_cpu(*pw); + + return cksum; +} + +/* + * Verify the flash image structure. The following verifications will + * be performed: + * 1) verify the fi_version is correct + * 2) verify the checksum of the entire image. + * 3) validate the adap_typ, action and length fields. + * 4) valdiate each component header. check the img_type and + * length fields + * 5) valdiate each component image. validate signatures and + * local checksums + */ +static bool verify_fi(struct esas2r_adapter *a, + struct esas2r_flash_context *fc) +{ + struct esas2r_flash_img *fi = fc->fi; + u8 type; + bool imgerr; + u16 i; + u32 len; + struct esas2r_component_header *ch; + + /* Verify the length - length must even since we do a word checksum */ + len = fi->length; + + if ((len & 1) + || len < fc->fi_hdr_len) { + fi->status = FI_STAT_LENGTH; + return false; + } + + /* Get adapter type and verify type in flash image */ + type = get_fi_adap_type(a); + if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) { + fi->status = FI_STAT_ADAPTYP; + return false; + } + + /* + * Loop through each component and verify the img_type and length + * fields. Keep a running count of the sizes sooze we can verify total + * size to additive size. + */ + imgerr = false; + + for (i = 0, len = 0, ch = fi->cmp_hdr; + i < fi->num_comps; + i++, ch++) { + bool cmperr = false; + + /* + * Verify that the component header has the same index as the + * image type. The headers must be ordered correctly + */ + if (i != ch->img_type) { + imgerr = true; + ch->status = CH_STAT_INVALID; + continue; + } + + switch (ch->img_type) { + case CH_IT_BIOS: + type = CODE_TYPE_PC; + break; + + case CH_IT_MAC: + type = CODE_TYPE_OPEN; + break; + + case CH_IT_EFI: + type = CODE_TYPE_EFI; + break; + } + + switch (ch->img_type) { + case CH_IT_FW: + case CH_IT_NVR: + break; + + case CH_IT_BIOS: + case CH_IT_MAC: + case CH_IT_EFI: + if (ch->length & 0x1ff) + cmperr = true; + + /* Test if component image is present */ + if (ch->length == 0) + break; + + /* Image is present - verify the image */ + if (chk_boot((u8 *)fi + ch->image_offset, ch->length) + != type) + cmperr = true; + + break; + + case CH_IT_CFG: + + /* Test if component image is present */ + if (ch->length == 0) { + cmperr = true; + break; + } + + /* Image is present - verify the image */ + if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length, + ch->length, NULL)) + cmperr = true; + + break; + + default: + + fi->status = FI_STAT_UNKNOWN; + return false; + } + + if (cmperr) { + imgerr = true; + ch->status = CH_STAT_INVALID; + } else { + ch->status = CH_STAT_PENDING; + len += ch->length; + } + } + + if (imgerr) { + fi->status = FI_STAT_MISSING; + return false; + } + + /* Compare fi->length to the sum of ch->length fields */ + if (len != fi->length - fc->fi_hdr_len) { + fi->status = FI_STAT_LENGTH; + return false; + } + + /* Compute the checksum - it should come out zero */ + if (fi->checksum != calc_fi_checksum(fc)) { + fi->status = FI_STAT_CHKSUM; + return false; + } + + return true; +} + +/* Fill in the FS IOCTL response data from a completed request. */ +static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)rq->interrupt_cx; + + if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) + esas2r_enable_heartbeat(a); + + fs->driver_error = rq->req_stat; + + if (fs->driver_error == RS_SUCCESS) + fs->status = ATTO_STS_SUCCESS; + else + fs->status = ATTO_STS_FAILED; +} + +/* Prepare an FS IOCTL request to be sent to the firmware. */ +bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, + struct esas2r_ioctl_fs *fs, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func); + struct esas2r_ioctlfs_command *fsc = &fs->command; + u8 func = 0; + u32 datalen; + + fs->status = ATTO_STS_FAILED; + fs->driver_error = RS_PENDING; + + if (fs->version > ESAS2R_FS_VER) { + fs->status = ATTO_STS_INV_VERSION; + return false; + } + + if (fsc->command >= cmdcnt) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + func = cmd_to_fls_func[fsc->command]; + if (func == 0xFF) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + if (fsc->command != ESAS2R_FS_CMD_CANCEL) { + if ((a->pcid->device != ATTO_DID_MV_88RC9580 + || fs->adap_type != ESAS2R_FS_AT_ESASRAID2) + && (a->pcid->device != ATTO_DID_MV_88RC9580TS + || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2) + && (a->pcid->device != ATTO_DID_MV_88RC9580TSE + || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E) + && (a->pcid->device != ATTO_DID_MV_88RC9580TL + || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) { + fs->status = ATTO_STS_INV_ADAPTER; + return false; + } + + if (fs->driver_ver > ESAS2R_FS_DRVR_VER) { + fs->status = ATTO_STS_INV_DRVR_VER; + return false; + } + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { + fs->status = ATTO_STS_DEGRADED; + return false; + } + + rq->interrupt_cb = esas2r_complete_fs_ioctl; + rq->interrupt_cx = fs; + datalen = le32_to_cpu(fsc->length); + esas2r_build_flash_req(a, + rq, + func, + fsc->checksum, + le32_to_cpu(fsc->flash_addr), + datalen); + + if (func == VDA_FLASH_WRITE + || func == VDA_FLASH_READ) { + if (datalen == 0) { + fs->status = ATTO_STS_INV_FUNC; + return false; + } + + esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge); + sgc->length = datalen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + fs->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + + if (func == VDA_FLASH_COMMIT) + esas2r_disable_heartbeat(a); + + esas2r_start_request(a, rq); + + return true; +} + +static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function) +{ + u32 starttime; + u32 timeout; + u32 intstat; + u32 doorbell; + + /* Disable chip interrupts awhile */ + if (function == DRBL_FLASH_REQ) + esas2r_disable_chip_interrupts(a); + + /* Issue the request to the firmware */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, function); + + /* Now wait for the firmware to process it */ + starttime = jiffies_to_msecs(jiffies); + + if (test_bit(AF_CHPRST_PENDING, &a->flags) || + test_bit(AF_DISC_PENDING, &a->flags)) + timeout = 40000; + else + timeout = 5000; + + while (true) { + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (intstat & MU_INTSTAT_DRBL) { + /* Got a doorbell interrupt. Check for the function */ + doorbell = + esas2r_read_register_dword(a, MU_DOORBELL_OUT); + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + if (doorbell & function) + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { + /* + * Iimeout. If we were requesting flash access, + * indicate we are done so the firmware knows we gave + * up. If this was a REQ, we also need to re-enable + * chip interrupts. + */ + if (function == DRBL_FLASH_REQ) { + esas2r_hdebug("flash access timeout"); + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_FLASH_DONE); + esas2r_enable_chip_interrupts(a); + } else { + esas2r_hdebug("flash release timeout"); + } + + return false; + } + } + + /* if we're done, re-enable chip interrupts */ + if (function == DRBL_FLASH_DONE) + esas2r_enable_chip_interrupts(a); + + return true; +} + +#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE) + +bool esas2r_read_flash_block(struct esas2r_adapter *a, + void *to, + u32 from, + u32 size) +{ + u8 *end = (u8 *)to; + + /* Try to acquire access to the flash */ + if (!esas2r_flash_access(a, DRBL_FLASH_REQ)) + return false; + + while (size) { + u32 len; + u32 offset; + u32 iatvr; + + if (test_bit(AF2_SERIAL_FLASH, &a->flags2)) + iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); + else + iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); + + esas2r_map_data_window(a, iatvr); + offset = from & (WINDOW_SIZE - 1); + len = size; + + if (len > WINDOW_SIZE - offset) + len = WINDOW_SIZE - offset; + + from += len; + size -= len; + + while (len--) { + *end++ = esas2r_read_data_byte(a, offset); + offset++; + } + } + + /* Release flash access */ + esas2r_flash_access(a, DRBL_FLASH_DONE); + return true; +} + +bool esas2r_read_flash_rev(struct esas2r_adapter *a) +{ + u8 bytes[256]; + u16 *pw; + u16 *pwstart; + u16 type; + u16 size; + u32 sz; + + sz = sizeof(bytes); + pw = (u16 *)(bytes + sz); + pwstart = (u16 *)bytes + 2; + + if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz)) + goto invalid_rev; + + while (pw >= pwstart) { + pw--; + type = le16_to_cpu(*pw); + pw--; + size = le16_to_cpu(*pw); + pw -= size / 2; + + if (type == FBT_CPYR + || type == FBT_SETUP + || pw < pwstart) + continue; + + if (type == FBT_FLASH_VER) + a->flash_ver = le32_to_cpu(*(u32 *)pw); + + break; + } + +invalid_rev: + return esas2r_print_flash_rev(a); +} + +bool esas2r_print_flash_rev(struct esas2r_adapter *a) +{ + u16 year = LOWORD(a->flash_ver); + u8 day = LOBYTE(HIWORD(a->flash_ver)); + u8 month = HIBYTE(HIWORD(a->flash_ver)); + + if (day == 0 + || month == 0 + || day > 31 + || month > 12 + || year < 2006 + || year > 9999) { + strcpy(a->flash_rev, "not found"); + a->flash_ver = 0; + return false; + } + + sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year); + esas2r_hdebug("flash version: %s", a->flash_rev); + return true; +} + +/* + * Find the type of boot image type that is currently in the flash. + * The chip only has a 64 KB PCI-e expansion ROM + * size so only one image can be flashed at a time. + */ +bool esas2r_read_image_type(struct esas2r_adapter *a) +{ + u8 bytes[256]; + struct esas2r_boot_image *bi; + struct esas2r_boot_header *bh; + u32 sz; + u32 len; + u32 offset; + + /* Start at the base of the boot images and look for a valid image */ + sz = sizeof(bytes); + len = FLS_LENGTH_BOOT; + offset = 0; + + while (true) { + if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT + + offset, + sz)) + goto invalid_rev; + + bi = (struct esas2r_boot_image *)bytes; + bh = (struct esas2r_boot_header *)((u8 *)bi + + le16_to_cpu( + bi->header_offset)); + if (bi->signature != cpu_to_le16(0xAA55)) + goto invalid_rev; + + if (bh->code_type == CODE_TYPE_PC) { + strcpy(a->image_type, "BIOS"); + + return true; + } else if (bh->code_type == CODE_TYPE_EFI) { + struct esas2r_efi_image *ei; + + /* + * So we have an EFI image. There are several types + * so see which architecture we have. + */ + ei = (struct esas2r_efi_image *)bytes; + + switch (le16_to_cpu(ei->machine_type)) { + case EFI_MACHINE_IA32: + strcpy(a->image_type, "EFI 32-bit"); + return true; + + case EFI_MACHINE_IA64: + strcpy(a->image_type, "EFI itanium"); + return true; + + case EFI_MACHINE_X64: + strcpy(a->image_type, "EFI 64-bit"); + return true; + + case EFI_MACHINE_EBC: + strcpy(a->image_type, "EFI EBC"); + return true; + + default: + goto invalid_rev; + } + } else { + u32 thislen; + + /* jump to the next image */ + thislen = (u32)le16_to_cpu(bh->image_length) * 512; + if (thislen == 0 + || thislen + offset > len + || bh->indicator == INDICATOR_LAST) + break; + + offset += thislen; + } + } + +invalid_rev: + strcpy(a->image_type, "no boot images"); + return false; +} + +/* + * Read and validate current NVRAM parameters by accessing + * physical NVRAM directly. if currently stored parameters are + * invalid, use the defaults. + */ +bool esas2r_nvram_read_direct(struct esas2r_adapter *a) +{ + bool result; + + if (down_interruptible(&a->nvram_semaphore)) + return false; + + if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR, + sizeof(struct esas2r_sas_nvram))) { + esas2r_hdebug("NVRAM read failed, using defaults"); + return false; + } + + result = esas2r_nvram_validate(a); + + up(&a->nvram_semaphore); + + return result; +} + +/* Interrupt callback to process NVRAM completions. */ +static void esas2r_nvram_callback(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + if (rq->req_stat == RS_SUCCESS) { + /* last request was successful. see what to do now. */ + + switch (vrq->sub_func) { + case VDA_FLASH_BEGINW: + vrq->sub_func = VDA_FLASH_WRITE; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_WRITE: + vrq->sub_func = VDA_FLASH_COMMIT; + rq->req_stat = RS_PENDING; + break; + + case VDA_FLASH_READ: + esas2r_nvram_validate(a); + break; + + case VDA_FLASH_COMMIT: + default: + break; + } + } + + if (rq->req_stat != RS_PENDING) { + /* update the NVRAM state */ + if (rq->req_stat == RS_SUCCESS) + set_bit(AF_NVR_VALID, &a->flags); + else + clear_bit(AF_NVR_VALID, &a->flags); + + esas2r_enable_heartbeat(a); + + up(&a->nvram_semaphore); + } +} + +/* + * Write the contents of nvram to the adapter's physical NVRAM. + * The cached copy of the NVRAM is also updated. + */ +bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *nvram) +{ + struct esas2r_sas_nvram *n = nvram; + u8 sas_address_bytes[8]; + u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0]; + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return false; + + if (down_interruptible(&a->nvram_semaphore)) + return false; + + if (n == NULL) + n = a->nvram; + + /* check the validity of the settings */ + if (n->version > SASNVR_VERSION) { + up(&a->nvram_semaphore); + return false; + } + + memcpy(&sas_address_bytes[0], n->sas_addr, 8); + + if (sas_address_bytes[0] != 0x50 + || sas_address_bytes[1] != 0x01 + || sas_address_bytes[2] != 0x08 + || (sas_address_bytes[3] & 0xF0) != 0x60 + || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) { + up(&a->nvram_semaphore); + return false; + } + + if (n->spin_up_delay > SASNVR_SPINUP_MAX) + n->spin_up_delay = SASNVR_SPINUP_MAX; + + n->version = SASNVR_VERSION; + n->checksum = n->checksum - esas2r_nvramcalc_cksum(n); + memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram)); + + /* write the NVRAM */ + n = a->nvram; + esas2r_disable_heartbeat(a); + + esas2r_build_flash_req(a, + rq, + VDA_FLASH_BEGINW, + esas2r_nvramcalc_xor_cksum(n), + FLS_OFFSET_NVR, + sizeof(struct esas2r_sas_nvram)); + + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { + + vrq->data.sge[0].length = + cpu_to_le32(SGE_LAST | + sizeof(struct esas2r_sas_nvram)); + vrq->data.sge[0].address = cpu_to_le64( + a->uncached_phys + (u64)((u8 *)n - a->uncached)); + } else { + vrq->data.prde[0].ctl_len = + cpu_to_le32(sizeof(struct esas2r_sas_nvram)); + vrq->data.prde[0].address = cpu_to_le64( + a->uncached_phys + + (u64)((u8 *)n - a->uncached)); + } + rq->interrupt_cb = esas2r_nvram_callback; + esas2r_start_request(a, rq); + return true; +} + +/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */ +bool esas2r_nvram_validate(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *n = a->nvram; + bool rslt = false; + + if (n->signature[0] != 'E' + || n->signature[1] != 'S' + || n->signature[2] != 'A' + || n->signature[3] != 'S') { + esas2r_hdebug("invalid NVRAM signature"); + } else if (esas2r_nvramcalc_cksum(n)) { + esas2r_hdebug("invalid NVRAM checksum"); + } else if (n->version > SASNVR_VERSION) { + esas2r_hdebug("invalid NVRAM version"); + } else { + set_bit(AF_NVR_VALID, &a->flags); + rslt = true; + } + + if (rslt == false) { + esas2r_hdebug("using defaults"); + esas2r_nvram_set_defaults(a); + } + + return rslt; +} + +/* + * Set the cached NVRAM to defaults. note that this function sets the default + * NVRAM when it has been determined that the physical NVRAM is invalid. + * In this case, the SAS address is fabricated. + */ +void esas2r_nvram_set_defaults(struct esas2r_adapter *a) +{ + struct esas2r_sas_nvram *n = a->nvram; + u32 time = jiffies_to_msecs(jiffies); + + clear_bit(AF_NVR_VALID, &a->flags); + *n = default_sas_nvram; + n->sas_addr[3] |= 0x0F; + n->sas_addr[4] = HIBYTE(LOWORD(time)); + n->sas_addr[5] = LOBYTE(LOWORD(time)); + n->sas_addr[6] = a->pcid->bus->number; + n->sas_addr[7] = a->pcid->devfn; +} + +void esas2r_nvram_get_defaults(struct esas2r_adapter *a, + struct esas2r_sas_nvram *nvram) +{ + u8 sas_addr[8]; + + /* + * in case we are copying the defaults into the adapter, copy the SAS + * address out first. + */ + memcpy(&sas_addr[0], a->nvram->sas_addr, 8); + *nvram = default_sas_nvram; + memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); +} + +bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi, + struct esas2r_request *rq, struct esas2r_sg_context *sgc) +{ + struct esas2r_flash_context *fc = &a->flash_context; + u8 j; + struct esas2r_component_header *ch; + + if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) { + /* flag was already set */ + fi->status = FI_STAT_BUSY; + return false; + } + + memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context)); + sgc = &fc->sgc; + fc->fi = fi; + fc->sgc_offset = sgc->cur_offset; + rq->req_stat = RS_SUCCESS; + rq->interrupt_cx = fc; + + switch (fi->fi_version) { + case FI_VERSION_1: + fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf; + fc->num_comps = FI_NUM_COMPS_V1; + fc->fi_hdr_len = sizeof(struct esas2r_flash_img); + break; + + default: + return complete_fmapi_req(a, rq, FI_STAT_IMG_VER); + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return complete_fmapi_req(a, rq, FI_STAT_DEGRADED); + + switch (fi->action) { + case FI_ACT_DOWN: /* Download the components */ + /* Verify the format of the flash image */ + if (!verify_fi(a, fc)) + return complete_fmapi_req(a, rq, fi->status); + + /* Adjust the BIOS fields that are dependent on the HBA */ + ch = &fi->cmp_hdr[CH_IT_BIOS]; + + if (ch->length) + fix_bios(a, fi); + + /* Adjust the EFI fields that are dependent on the HBA */ + ch = &fi->cmp_hdr[CH_IT_EFI]; + + if (ch->length) + fix_efi(a, fi); + + /* + * Since the image was just modified, compute the checksum on + * the modified image. First update the CRC for the composite + * expansion ROM image. + */ + fi->checksum = calc_fi_checksum(fc); + + /* Disable the heartbeat */ + esas2r_disable_heartbeat(a); + + /* Now start up the download sequence */ + fc->task = FMTSK_ERASE_BOOT; + fc->func = VDA_FLASH_BEGINW; + fc->comp_typ = CH_IT_CFG; + fc->flsh_addr = FLS_OFFSET_BOOT; + fc->sgc.length = FLS_LENGTH_BOOT; + fc->sgc.cur_offset = NULL; + + /* Setup the callback address */ + fc->interrupt_cb = fw_download_proc; + break; + + case FI_ACT_UPSZ: /* Get upload sizes */ + fi->adap_typ = get_fi_adap_type(a); + fi->flags = 0; + fi->num_comps = fc->num_comps; + fi->length = fc->fi_hdr_len; + + /* Report the type of boot image in the rel_version string */ + memcpy(fi->rel_version, a->image_type, + sizeof(fi->rel_version)); + + /* Build the component headers */ + for (j = 0, ch = fi->cmp_hdr; + j < fi->num_comps; + j++, ch++) { + ch->img_type = j; + ch->status = CH_STAT_PENDING; + ch->length = 0; + ch->version = 0xffffffff; + ch->image_offset = 0; + ch->pad[0] = 0; + ch->pad[1] = 0; + } + + if (a->flash_ver != 0) { + fi->cmp_hdr[CH_IT_BIOS].version = + fi->cmp_hdr[CH_IT_MAC].version = + fi->cmp_hdr[CH_IT_EFI].version = + fi->cmp_hdr[CH_IT_CFG].version + = a->flash_ver; + + fi->cmp_hdr[CH_IT_BIOS].status = + fi->cmp_hdr[CH_IT_MAC].status = + fi->cmp_hdr[CH_IT_EFI].status = + fi->cmp_hdr[CH_IT_CFG].status = + CH_STAT_SUCCESS; + + return complete_fmapi_req(a, rq, FI_STAT_SUCCESS); + } + + /* fall through */ + + case FI_ACT_UP: /* Upload the components */ + default: + return complete_fmapi_req(a, rq, FI_STAT_INVALID); + } + + /* + * If we make it here, fc has been setup to do the first task. Call + * load_image to format the request, start it, and get out. The + * interrupt code will call the callback when the first message is + * complete. + */ + if (!load_image(a, rq)) + return complete_fmapi_req(a, rq, FI_STAT_FAILED); + + esas2r_start_request(a, rq); + + return true; +} diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c new file mode 100644 index 00000000000..6776931e25d --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -0,0 +1,1771 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_init.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +static bool esas2r_initmem_alloc(struct esas2r_adapter *a, + struct esas2r_mem_desc *mem_desc, + u32 align) +{ + mem_desc->esas2r_param = mem_desc->size + align; + mem_desc->virt_addr = NULL; + mem_desc->phys_addr = 0; + mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev, + (size_t)mem_desc-> + esas2r_param, + (dma_addr_t *)&mem_desc-> + phys_addr, + GFP_KERNEL); + + if (mem_desc->esas2r_data == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate %lu bytes of consistent memory!", + (long + unsigned + int)mem_desc->esas2r_param); + return false; + } + + mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align); + mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align); + memset(mem_desc->virt_addr, 0, mem_desc->size); + return true; +} + +static void esas2r_initmem_free(struct esas2r_adapter *a, + struct esas2r_mem_desc *mem_desc) +{ + if (mem_desc->virt_addr == NULL) + return; + + /* + * Careful! phys_addr and virt_addr may have been adjusted from the + * original allocation in order to return the desired alignment. That + * means we have to use the original address (in esas2r_data) and size + * (esas2r_param) and calculate the original physical address based on + * the difference between the requested and actual allocation size. + */ + if (mem_desc->phys_addr) { + int unalign = ((u8 *)mem_desc->virt_addr) - + ((u8 *)mem_desc->esas2r_data); + + dma_free_coherent(&a->pcid->dev, + (size_t)mem_desc->esas2r_param, + mem_desc->esas2r_data, + (dma_addr_t)(mem_desc->phys_addr - unalign)); + } else { + kfree(mem_desc->esas2r_data); + } + + mem_desc->virt_addr = NULL; +} + +static bool alloc_vda_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_mem_desc *memdesc = kzalloc( + sizeof(struct esas2r_mem_desc), GFP_KERNEL); + + if (memdesc == NULL) { + esas2r_hdebug("could not alloc mem for vda request memdesc\n"); + return false; + } + + memdesc->size = sizeof(union atto_vda_req) + + ESAS2R_DATA_BUF_LEN; + + if (!esas2r_initmem_alloc(a, memdesc, 256)) { + esas2r_hdebug("could not alloc mem for vda request\n"); + kfree(memdesc); + return false; + } + + a->num_vrqs++; + list_add(&memdesc->next_desc, &a->vrq_mds_head); + + rq->vrq_md = memdesc; + rq->vrq = (union atto_vda_req *)memdesc->virt_addr; + rq->vrq->scsi.handle = a->num_vrqs; + + return true; +} + +static void esas2r_unmap_regions(struct esas2r_adapter *a) +{ + if (a->regs) + iounmap((void __iomem *)a->regs); + + a->regs = NULL; + + pci_release_region(a->pcid, 2); + + if (a->data_window) + iounmap((void __iomem *)a->data_window); + + a->data_window = NULL; + + pci_release_region(a->pcid, 0); +} + +static int esas2r_map_regions(struct esas2r_adapter *a) +{ + int error; + + a->regs = NULL; + a->data_window = NULL; + + error = pci_request_region(a->pcid, 2, a->name); + if (error != 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "pci_request_region(2) failed, error %d", + error); + + return error; + } + + a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2), + pci_resource_len(a->pcid, 2)); + if (a->regs == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "ioremap failed for regs mem region\n"); + pci_release_region(a->pcid, 2); + return -EFAULT; + } + + error = pci_request_region(a->pcid, 0, a->name); + if (error != 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "pci_request_region(2) failed, error %d", + error); + esas2r_unmap_regions(a); + return error; + } + + a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid, + 0), + pci_resource_len(a->pcid, 0)); + if (a->data_window == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "ioremap failed for data_window mem region\n"); + esas2r_unmap_regions(a); + return -EFAULT; + } + + return 0; +} + +static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode) +{ + int i; + + /* Set up interrupt mode based on the requested value */ + switch (intr_mode) { + case INTR_MODE_LEGACY: +use_legacy_interrupts: + a->intr_mode = INTR_MODE_LEGACY; + break; + + case INTR_MODE_MSI: + i = pci_enable_msi(a->pcid); + if (i != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "failed to enable MSI for adapter %d, " + "falling back to legacy interrupts " + "(err=%d)", a->index, + i); + goto use_legacy_interrupts; + } + a->intr_mode = INTR_MODE_MSI; + set_bit(AF2_MSI_ENABLED, &a->flags2); + break; + + + default: + esas2r_log(ESAS2R_LOG_WARN, + "unknown interrupt_mode %d requested, " + "falling back to legacy interrupt", + interrupt_mode); + goto use_legacy_interrupts; + } +} + +static void esas2r_claim_interrupts(struct esas2r_adapter *a) +{ + unsigned long flags = 0; + + if (a->intr_mode == INTR_MODE_LEGACY) + flags |= IRQF_SHARED; + + esas2r_log(ESAS2R_LOG_INFO, + "esas2r_claim_interrupts irq=%d (%p, %s, %x)", + a->pcid->irq, a, a->name, flags); + + if (request_irq(a->pcid->irq, + (a->intr_mode == + INTR_MODE_LEGACY) ? esas2r_interrupt : + esas2r_msi_interrupt, + flags, + a->name, + a)) { + esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X", + a->pcid->irq); + return; + } + + set_bit(AF2_IRQ_CLAIMED, &a->flags2); + esas2r_log(ESAS2R_LOG_INFO, + "claimed IRQ %d flags: 0x%lx", + a->pcid->irq, flags); +} + +int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid, + int index) +{ + struct esas2r_adapter *a; + u64 bus_addr = 0; + int i; + void *next_uncached; + struct esas2r_request *first_request, *last_request; + + if (index >= MAX_ADAPTERS) { + esas2r_log(ESAS2R_LOG_CRIT, + "tried to init invalid adapter index %u!", + index); + return 0; + } + + if (esas2r_adapters[index]) { + esas2r_log(ESAS2R_LOG_CRIT, + "tried to init existing adapter index %u!", + index); + return 0; + } + + a = (struct esas2r_adapter *)host->hostdata; + memset(a, 0, sizeof(struct esas2r_adapter)); + a->pcid = pcid; + a->host = host; + + if (sizeof(dma_addr_t) > 4) { + const uint64_t required_mask = dma_get_required_mask + (&pcid->dev); + if (required_mask > DMA_BIT_MASK(32) + && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64)) + && !pci_set_consistent_dma_mask(pcid, + DMA_BIT_MASK(64))) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "64-bit PCI addressing enabled\n"); + } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pcid, + DMA_BIT_MASK(32))) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "32-bit PCI addressing enabled\n"); + } else { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to set DMA mask"); + esas2r_kill_adapter(index); + return 0; + } + } else { + if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pcid, + DMA_BIT_MASK(32))) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "32-bit PCI addressing enabled\n"); + } else { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to set DMA mask"); + esas2r_kill_adapter(index); + return 0; + } + } + esas2r_adapters[index] = a; + sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index); + esas2r_debug("new adapter %p, name %s", a, a->name); + spin_lock_init(&a->request_lock); + spin_lock_init(&a->fw_event_lock); + sema_init(&a->fm_api_semaphore, 1); + sema_init(&a->fs_api_semaphore, 1); + sema_init(&a->nvram_semaphore, 1); + + esas2r_fw_event_off(a); + snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d", + a->index); + a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name); + + init_waitqueue_head(&a->buffered_ioctl_waiter); + init_waitqueue_head(&a->nvram_waiter); + init_waitqueue_head(&a->fm_api_waiter); + init_waitqueue_head(&a->fs_api_waiter); + init_waitqueue_head(&a->vda_waiter); + + INIT_LIST_HEAD(&a->general_req.req_list); + INIT_LIST_HEAD(&a->active_list); + INIT_LIST_HEAD(&a->defer_list); + INIT_LIST_HEAD(&a->free_sg_list_head); + INIT_LIST_HEAD(&a->avail_request); + INIT_LIST_HEAD(&a->vrq_mds_head); + INIT_LIST_HEAD(&a->fw_event_list); + + first_request = (struct esas2r_request *)((u8 *)(a + 1)); + + for (last_request = first_request, i = 1; i < num_requests; + last_request++, i++) { + INIT_LIST_HEAD(&last_request->req_list); + list_add_tail(&last_request->comp_list, &a->avail_request); + if (!alloc_vda_req(a, last_request)) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate a VDA request!"); + esas2r_kill_adapter(index); + return 0; + } + } + + esas2r_debug("requests: %p to %p (%d, %d)", first_request, + last_request, + sizeof(*first_request), + num_requests); + + if (esas2r_map_regions(a) != 0) { + esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!"); + esas2r_kill_adapter(index); + return 0; + } + + a->index = index; + + /* interrupts will be disabled until we are done with init */ + atomic_inc(&a->dis_ints_cnt); + atomic_inc(&a->disable_cnt); + set_bit(AF_CHPRST_PENDING, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); + set_bit(AF_FIRST_INIT, &a->flags); + set_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->init_msg = ESAS2R_INIT_MSG_START; + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + + esas2r_setup_interrupts(a, interrupt_mode); + + a->uncached_size = esas2r_get_uncached_size(a); + a->uncached = dma_alloc_coherent(&pcid->dev, + (size_t)a->uncached_size, + (dma_addr_t *)&bus_addr, + GFP_KERNEL); + if (a->uncached == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate %d bytes of consistent memory!", + a->uncached_size); + esas2r_kill_adapter(index); + return 0; + } + + a->uncached_phys = bus_addr; + + esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)", + a->uncached_size, + a->uncached, + upper_32_bits(bus_addr), + lower_32_bits(bus_addr)); + memset(a->uncached, 0, a->uncached_size); + next_uncached = a->uncached; + + if (!esas2r_init_adapter_struct(a, + &next_uncached)) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to initialize adapter structure (2)!"); + esas2r_kill_adapter(index); + return 0; + } + + tasklet_init(&a->tasklet, + esas2r_adapter_tasklet, + (unsigned long)a); + + /* + * Disable chip interrupts to prevent spurious interrupts + * until we claim the IRQ. + */ + esas2r_disable_chip_interrupts(a); + esas2r_check_adapter(a); + + if (!esas2r_init_adapter_hw(a, true)) + esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!"); + else + esas2r_debug("esas2r_init_adapter ok"); + + esas2r_claim_interrupts(a); + + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) + esas2r_enable_chip_interrupts(a); + + set_bit(AF2_INIT_DONE, &a->flags2); + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) + esas2r_kickoff_timer(a); + esas2r_debug("esas2r_init_adapter done for %p (%d)", + a, a->disable_cnt); + + return 1; +} + +static void esas2r_adapter_power_down(struct esas2r_adapter *a, + int power_management) +{ + struct esas2r_mem_desc *memdesc, *next; + + if ((test_bit(AF2_INIT_DONE, &a->flags2)) + && (!test_bit(AF_DEGRADED_MODE, &a->flags))) { + if (!power_management) { + del_timer_sync(&a->timer); + tasklet_kill(&a->tasklet); + } + esas2r_power_down(a); + + /* + * There are versions of firmware that do not handle the sync + * cache command correctly. Stall here to ensure that the + * cache is lazily flushed. + */ + mdelay(500); + esas2r_debug("chip halted"); + } + + /* Remove sysfs binary files */ + if (a->sysfs_fw_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw); + a->sysfs_fw_created = 0; + } + + if (a->sysfs_fs_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs); + a->sysfs_fs_created = 0; + } + + if (a->sysfs_vda_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda); + a->sysfs_vda_created = 0; + } + + if (a->sysfs_hw_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw); + a->sysfs_hw_created = 0; + } + + if (a->sysfs_live_nvram_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, + &bin_attr_live_nvram); + a->sysfs_live_nvram_created = 0; + } + + if (a->sysfs_default_nvram_created) { + sysfs_remove_bin_file(&a->host->shost_dev.kobj, + &bin_attr_default_nvram); + a->sysfs_default_nvram_created = 0; + } + + /* Clean up interrupts */ + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "free_irq(%d) called", a->pcid->irq); + + free_irq(a->pcid->irq, a); + esas2r_debug("IRQ released"); + clear_bit(AF2_IRQ_CLAIMED, &a->flags2); + } + + if (test_bit(AF2_MSI_ENABLED, &a->flags2)) { + pci_disable_msi(a->pcid); + clear_bit(AF2_MSI_ENABLED, &a->flags2); + esas2r_debug("MSI disabled"); + } + + if (a->inbound_list_md.virt_addr) + esas2r_initmem_free(a, &a->inbound_list_md); + + if (a->outbound_list_md.virt_addr) + esas2r_initmem_free(a, &a->outbound_list_md); + + list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head, + next_desc) { + esas2r_initmem_free(a, memdesc); + } + + /* Following frees everything allocated via alloc_vda_req */ + list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) { + esas2r_initmem_free(a, memdesc); + list_del(&memdesc->next_desc); + kfree(memdesc); + } + + kfree(a->first_ae_req); + a->first_ae_req = NULL; + + kfree(a->sg_list_mds); + a->sg_list_mds = NULL; + + kfree(a->req_table); + a->req_table = NULL; + + if (a->regs) { + esas2r_unmap_regions(a); + a->regs = NULL; + a->data_window = NULL; + esas2r_debug("regions unmapped"); + } +} + +/* Release/free allocated resources for specified adapters. */ +void esas2r_kill_adapter(int i) +{ + struct esas2r_adapter *a = esas2r_adapters[i]; + + if (a) { + unsigned long flags; + struct workqueue_struct *wq; + esas2r_debug("killing adapter %p [%d] ", a, i); + esas2r_fw_event_off(a); + esas2r_adapter_power_down(a, 0); + if (esas2r_buffered_ioctl && + (a->pcid == esas2r_buffered_ioctl_pcid)) { + dma_free_coherent(&a->pcid->dev, + (size_t)esas2r_buffered_ioctl_size, + esas2r_buffered_ioctl, + esas2r_buffered_ioctl_addr); + esas2r_buffered_ioctl = NULL; + } + + if (a->vda_buffer) { + dma_free_coherent(&a->pcid->dev, + (size_t)VDA_MAX_BUFFER_SIZE, + a->vda_buffer, + (dma_addr_t)a->ppvda_buffer); + a->vda_buffer = NULL; + } + if (a->fs_api_buffer) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->fs_api_buffer_size, + a->fs_api_buffer, + (dma_addr_t)a->ppfs_api_buffer); + a->fs_api_buffer = NULL; + } + + kfree(a->local_atto_ioctl); + a->local_atto_ioctl = NULL; + + spin_lock_irqsave(&a->fw_event_lock, flags); + wq = a->fw_event_q; + a->fw_event_q = NULL; + spin_unlock_irqrestore(&a->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + + if (a->uncached) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->uncached_size, + a->uncached, + (dma_addr_t)a->uncached_phys); + a->uncached = NULL; + esas2r_debug("uncached area freed"); + } + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "pci_disable_device() called. msix_enabled: %d " + "msi_enabled: %d irq: %d pin: %d", + a->pcid->msix_enabled, + a->pcid->msi_enabled, + a->pcid->irq, + a->pcid->pin); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "before pci_disable_device() enable_cnt: %d", + a->pcid->enable_cnt.counter); + + pci_disable_device(a->pcid); + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "after pci_disable_device() enable_cnt: %d", + a->pcid->enable_cnt.counter); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->pcid->dev), + "pci_set_drv_data(%p, NULL) called", + a->pcid); + + pci_set_drvdata(a->pcid, NULL); + esas2r_adapters[i] = NULL; + + if (test_bit(AF2_INIT_DONE, &a->flags2)) { + clear_bit(AF2_INIT_DONE, &a->flags2); + + set_bit(AF_DEGRADED_MODE, &a->flags); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->host->shost_gendev), + "scsi_remove_host() called"); + + scsi_remove_host(a->host); + + esas2r_log_dev(ESAS2R_LOG_INFO, + &(a->host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(a->host); + } + } +} + +int esas2r_cleanup(struct Scsi_Host *host) +{ + struct esas2r_adapter *a; + int index; + + if (host == NULL) { + int i; + + esas2r_debug("esas2r_cleanup everything"); + for (i = 0; i < MAX_ADAPTERS; i++) + esas2r_kill_adapter(i); + return -1; + } + + esas2r_debug("esas2r_cleanup called for host %p", host); + a = (struct esas2r_adapter *)host->hostdata; + index = a->index; + esas2r_kill_adapter(index); + return index; +} + +int esas2r_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + u32 device_state; + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()"); + if (!a) + return -ENODEV; + + esas2r_adapter_power_down(a, 1); + device_state = pci_choose_state(pdev, state); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_save_state() called"); + pci_save_state(pdev); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_disable_device() called"); + pci_disable_device(pdev); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_set_power_state() called"); + pci_set_power_state(pdev, device_state); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0"); + return 0; +} + +int esas2r_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; + int rez; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()"); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_set_power_state(PCI_D0) " + "called"); + pci_set_power_state(pdev, PCI_D0); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_enable_wake(PCI_D0, 0) " + "called"); + pci_enable_wake(pdev, PCI_D0, 0); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_restore_state() called"); + pci_restore_state(pdev); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "pci_enable_device() called"); + rez = pci_enable_device(pdev); + pci_set_master(pdev); + + if (!a) { + rez = -ENODEV; + goto error_exit; + } + + if (esas2r_map_regions(a) != 0) { + esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!"); + rez = -ENOMEM; + goto error_exit; + } + + /* Set up interupt mode */ + esas2r_setup_interrupts(a, a->intr_mode); + + /* + * Disable chip interrupts to prevent spurious interrupts until we + * claim the IRQ. + */ + esas2r_disable_chip_interrupts(a); + if (!esas2r_power_up(a, true)) { + esas2r_debug("yikes, esas2r_power_up failed"); + rez = -ENOMEM; + goto error_exit; + } + + esas2r_claim_interrupts(a); + + if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) { + /* + * Now that system interrupt(s) are claimed, we can enable + * chip interrupts. + */ + esas2r_enable_chip_interrupts(a); + esas2r_kickoff_timer(a); + } else { + esas2r_debug("yikes, unable to claim IRQ"); + esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!"); + rez = -ENOMEM; + goto error_exit; + } + +error_exit: + esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d", + rez); + return rez; +} + +bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str) +{ + set_bit(AF_DEGRADED_MODE, &a->flags); + esas2r_log(ESAS2R_LOG_CRIT, + "setting adapter to degraded mode: %s\n", error_str); + return false; +} + +u32 esas2r_get_uncached_size(struct esas2r_adapter *a) +{ + return sizeof(struct esas2r_sas_nvram) + + ALIGN(ESAS2R_DISC_BUF_LEN, 8) + + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */ + + 8 + + (num_sg_lists * (u16)sgl_page_size) + + ALIGN((num_requests + num_ae_requests + 1 + + ESAS2R_LIST_EXTRA) * + sizeof(struct esas2r_inbound_list_source_entry), + 8) + + ALIGN((num_requests + num_ae_requests + 1 + + ESAS2R_LIST_EXTRA) * + sizeof(struct atto_vda_ob_rsp), 8) + + 256; /* VDA request and buffer align */ +} + +static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) +{ + int pcie_cap_reg; + + pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); + if (pcie_cap_reg) { + u16 devcontrol; + + pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, + &devcontrol); + + if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) { + esas2r_log(ESAS2R_LOG_INFO, + "max read request size > 512B"); + + devcontrol &= ~PCI_EXP_DEVCTL_READRQ; + devcontrol |= 0x2000; + pci_write_config_word(a->pcid, + pcie_cap_reg + PCI_EXP_DEVCTL, + devcontrol); + } + } +} + +/* + * Determine the organization of the uncached data area and + * finish initializing the adapter structure + */ +bool esas2r_init_adapter_struct(struct esas2r_adapter *a, + void **uncached_area) +{ + u32 i; + u8 *high; + struct esas2r_inbound_list_source_entry *element; + struct esas2r_request *rq; + struct esas2r_mem_desc *sgl; + + spin_lock_init(&a->sg_list_lock); + spin_lock_init(&a->mem_lock); + spin_lock_init(&a->queue_lock); + + a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS]; + + if (!alloc_vda_req(a, &a->general_req)) { + esas2r_hdebug( + "failed to allocate a VDA request for the general req!"); + return false; + } + + /* allocate requests for asynchronous events */ + a->first_ae_req = + kzalloc(num_ae_requests * sizeof(struct esas2r_request), + GFP_KERNEL); + + if (a->first_ae_req == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for asynchronous events"); + return false; + } + + /* allocate the S/G list memory descriptors */ + a->sg_list_mds = kzalloc( + num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL); + + if (a->sg_list_mds == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for s/g list descriptors"); + return false; + } + + /* allocate the request table */ + a->req_table = + kzalloc((num_requests + num_ae_requests + + 1) * sizeof(struct esas2r_request *), GFP_KERNEL); + + if (a->req_table == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "failed to allocate memory for the request table"); + return false; + } + + /* initialize PCI configuration space */ + esas2r_init_pci_cfg_space(a); + + /* + * the thunder_stream boards all have a serial flash part that has a + * different base address on the AHB bus. + */ + if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID) + && (a->pcid->subsystem_device & ATTO_SSDID_TBT)) + a->flags2 |= AF2_THUNDERBOLT; + + if (test_bit(AF2_THUNDERBOLT, &a->flags2)) + a->flags2 |= AF2_SERIAL_FLASH; + + if (a->pcid->subsystem_device == ATTO_TLSH_1068) + a->flags2 |= AF2_THUNDERLINK; + + /* Uncached Area */ + high = (u8 *)*uncached_area; + + /* initialize the scatter/gather table pages */ + + for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) { + sgl->size = sgl_page_size; + + list_add_tail(&sgl->next_desc, &a->free_sg_list_head); + + if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) { + /* Allow the driver to load if the minimum count met. */ + if (i < NUM_SGL_MIN) + return false; + break; + } + } + + /* compute the size of the lists */ + a->list_size = num_requests + ESAS2R_LIST_EXTRA; + + /* allocate the inbound list */ + a->inbound_list_md.size = a->list_size * + sizeof(struct + esas2r_inbound_list_source_entry); + + if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) { + esas2r_hdebug("failed to allocate IB list"); + return false; + } + + /* allocate the outbound list */ + a->outbound_list_md.size = a->list_size * + sizeof(struct atto_vda_ob_rsp); + + if (!esas2r_initmem_alloc(a, &a->outbound_list_md, + ESAS2R_LIST_ALIGN)) { + esas2r_hdebug("failed to allocate IB list"); + return false; + } + + /* allocate the NVRAM structure */ + a->nvram = (struct esas2r_sas_nvram *)high; + high += sizeof(struct esas2r_sas_nvram); + + /* allocate the discovery buffer */ + a->disc_buffer = high; + high += ESAS2R_DISC_BUF_LEN; + high = PTR_ALIGN(high, 8); + + /* allocate the outbound list copy pointer */ + a->outbound_copy = (u32 volatile *)high; + high += sizeof(u32); + + if (!test_bit(AF_NVR_VALID, &a->flags)) + esas2r_nvram_set_defaults(a); + + /* update the caller's uncached memory area pointer */ + *uncached_area = (void *)high; + + /* initialize the allocated memory */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + memset(a->req_table, 0, + (num_requests + num_ae_requests + + 1) * sizeof(struct esas2r_request *)); + + esas2r_targ_db_initialize(a); + + /* prime parts of the inbound list */ + element = + (struct esas2r_inbound_list_source_entry *)a-> + inbound_list_md. + virt_addr; + + for (i = 0; i < a->list_size; i++) { + element->address = 0; + element->reserved = 0; + element->length = cpu_to_le32(HWILSE_INTERFACE_F0 + | (sizeof(union + atto_vda_req) + / + sizeof(u32))); + element++; + } + + /* init the AE requests */ + for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, + i++) { + INIT_LIST_HEAD(&rq->req_list); + if (!alloc_vda_req(a, rq)) { + esas2r_hdebug( + "failed to allocate a VDA request!"); + return false; + } + + esas2r_rq_init_request(rq, a); + + /* override the completion function */ + rq->comp_cb = esas2r_ae_complete; + } + } + + return true; +} + +/* This code will verify that the chip is operational. */ +bool esas2r_check_adapter(struct esas2r_adapter *a) +{ + u32 starttime; + u32 doorbell; + u64 ppaddr; + u32 dw; + + /* + * if the chip reset detected flag is set, we can bypass a bunch of + * stuff. + */ + if (test_bit(AF_CHPRST_DETECTED, &a->flags)) + goto skip_chip_reset; + + /* + * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver + * may have left them enabled or we may be recovering from a fault. + */ + esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK); + esas2r_flush_register_dword(a, MU_INT_MASK_OUT); + + /* + * wait for the firmware to become ready by forcing an interrupt and + * waiting for a response. + */ + starttime = jiffies_to_msecs(jiffies); + + while (true) { + esas2r_force_interrupt(a); + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell == 0xFFFFFFFF) { + /* + * Give the firmware up to two seconds to enable + * register access after a reset. + */ + if ((jiffies_to_msecs(jiffies) - starttime) > 2000) + return esas2r_set_degraded_mode(a, + "unable to access registers"); + } else if (doorbell & DRBL_FORCE_INT) { + u32 ver = (doorbell & DRBL_FW_VER_MSK); + + /* + * This driver supports version 0 and version 1 of + * the API + */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + + if (ver == DRBL_FW_VER_0) { + set_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + } else if (ver == DRBL_FW_VER_1) { + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 1024; + a->build_sgl = esas2r_build_sg_list_prd; + } else { + return esas2r_set_degraded_mode(a, + "unknown firmware version"); + } + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 180000) { + esas2r_hdebug("FW ready TMO"); + esas2r_bugon(); + + return esas2r_set_degraded_mode(a, + "firmware start has timed out"); + } + } + + /* purge any asynchronous events since we will repost them later */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(50)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug("timeout waiting for interface down"); + break; + } + } +skip_chip_reset: + /* + * first things first, before we go changing any of these registers + * disable the communication lists. + */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); + dw &= ~MU_ILC_ENABLE; + esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); + dw &= ~MU_OLC_ENABLE; + esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); + + /* configure the communication list addresses */ + ppaddr = a->inbound_list_md.phys_addr; + esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI, + upper_32_bits(ppaddr)); + ppaddr = a->outbound_list_md.phys_addr; + esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI, + upper_32_bits(ppaddr)); + ppaddr = a->uncached_phys + + ((u8 *)a->outbound_copy - a->uncached); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO, + lower_32_bits(ppaddr)); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI, + upper_32_bits(ppaddr)); + + /* reset the read and write pointers */ + *a->outbound_copy = + a->last_write = + a->last_read = a->list_size - 1; + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); + esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE | + a->last_write); + esas2r_write_register_dword(a, MU_OUT_LIST_WRITE, + MU_OLW_TOGGLE | a->last_write); + + /* configure the interface select fields */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG); + dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST); + esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG, + (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR)); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG); + dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE); + esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG, + (dw | MU_OLIC_LIST_F0 | + MU_OLIC_SOURCE_DDR)); + + /* finish configuring the communication lists */ + dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG); + dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK); + dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC + | (a->list_size << MU_ILC_NUMBER_SHIFT); + esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw); + dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG); + dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK); + dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT); + esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw); + + /* + * notify the firmware that we're done setting up the communication + * list registers. wait here until the firmware is done configuring + * its lists. it will signal that it is done by enabling the lists. + */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_INIT) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug( + "timeout waiting for communication list init"); + esas2r_bugon(); + return esas2r_set_degraded_mode(a, + "timeout waiting for communication list init"); + } + } + + /* + * flag whether the firmware supports the power down doorbell. we + * determine this by reading the inbound doorbell enable mask. + */ + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB); + if (doorbell & DRBL_POWER_DOWN) + set_bit(AF2_VDA_POWER_DOWN, &a->flags2); + else + clear_bit(AF2_VDA_POWER_DOWN, &a->flags2); + + /* + * enable assertion of outbound queue and doorbell interrupts in the + * main interrupt cause register. + */ + esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK); + esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK); + return true; +} + +/* Process the initialization message just completed and format the next one. */ +static bool esas2r_format_init_msg(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u32 msg = a->init_msg; + struct atto_vda_cfg_init *ci; + + a->init_msg = 0; + + switch (msg) { + case ESAS2R_INIT_MSG_START: + case ESAS2R_INIT_MSG_REINIT: + { + struct timeval now; + do_gettimeofday(&now); + esas2r_hdebug("CFG init"); + esas2r_build_cfg_req(a, + rq, + VDA_CFG_INIT, + 0, + NULL); + ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; + ci->sgl_page_size = cpu_to_le32(sgl_page_size); + ci->epoch_time = cpu_to_le32(now.tv_sec); + rq->flags |= RF_FAILURE_OK; + a->init_msg = ESAS2R_INIT_MSG_INIT; + break; + } + + case ESAS2R_INIT_MSG_INIT: + if (rq->req_stat == RS_SUCCESS) { + u32 major; + u32 minor; + u16 fw_release; + + a->fw_version = le16_to_cpu( + rq->func_rsp.cfg_rsp.vda_version); + a->fw_build = rq->func_rsp.cfg_rsp.fw_build; + fw_release = le16_to_cpu( + rq->func_rsp.cfg_rsp.fw_release); + major = LOBYTE(fw_release); + minor = HIBYTE(fw_release); + a->fw_version += (major << 16) + (minor << 24); + } else { + esas2r_hdebug("FAILED"); + } + + /* + * the 2.71 and earlier releases of R6xx firmware did not error + * unsupported config requests correctly. + */ + + if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) + || (be32_to_cpu(a->fw_version) > 0x00524702)) { + esas2r_hdebug("CFG get init"); + esas2r_build_cfg_req(a, + rq, + VDA_CFG_GET_INIT2, + sizeof(struct atto_vda_cfg_init), + NULL); + + rq->vrq->cfg.sg_list_offset = offsetof( + struct atto_vda_cfg_req, + data.sge); + rq->vrq->cfg.data.prde.ctl_len = + cpu_to_le32(sizeof(struct atto_vda_cfg_init)); + rq->vrq->cfg.data.prde.address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + rq->flags |= RF_FAILURE_OK; + a->init_msg = ESAS2R_INIT_MSG_GET_INIT; + break; + } + + case ESAS2R_INIT_MSG_GET_INIT: + if (msg == ESAS2R_INIT_MSG_GET_INIT) { + ci = (struct atto_vda_cfg_init *)rq->data_buf; + if (rq->req_stat == RS_SUCCESS) { + a->num_targets_backend = + le32_to_cpu(ci->num_targets_backend); + a->ioctl_tunnel = + le32_to_cpu(ci->ioctl_tunnel); + } else { + esas2r_hdebug("FAILED"); + } + } + /* fall through */ + + default: + rq->req_stat = RS_SUCCESS; + return false; + } + return true; +} + +/* + * Perform initialization messages via the request queue. Messages are + * performed with interrupts disabled. + */ +bool esas2r_init_msgs(struct esas2r_adapter *a) +{ + bool success = true; + struct esas2r_request *rq = &a->general_req; + + esas2r_rq_init_request(rq, a); + rq->comp_cb = esas2r_dummy_complete; + + if (a->init_msg == 0) + a->init_msg = ESAS2R_INIT_MSG_REINIT; + + while (a->init_msg) { + if (esas2r_format_init_msg(a, rq)) { + unsigned long flags; + while (true) { + spin_lock_irqsave(&a->queue_lock, flags); + esas2r_start_vda_request(a, rq); + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_wait_request(a, rq); + if (rq->req_stat != RS_PENDING) + break; + } + } + + if (rq->req_stat == RS_SUCCESS + || ((rq->flags & RF_FAILURE_OK) + && rq->req_stat != RS_TIMEOUT)) + continue; + + esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)", + a->init_msg, rq->req_stat, rq->flags); + a->init_msg = ESAS2R_INIT_MSG_START; + success = false; + break; + } + + esas2r_rq_destroy_request(rq, a); + return success; +} + +/* Initialize the adapter chip */ +bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll) +{ + bool rslt = false; + struct esas2r_request *rq; + u32 i; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + goto exit; + + if (!test_bit(AF_NVR_VALID, &a->flags)) { + if (!esas2r_nvram_read_direct(a)) + esas2r_log(ESAS2R_LOG_WARN, + "invalid/missing NVRAM parameters"); + } + + if (!esas2r_init_msgs(a)) { + esas2r_set_degraded_mode(a, "init messages failed"); + goto exit; + } + + /* The firmware is ready. */ + clear_bit(AF_DEGRADED_MODE, &a->flags); + clear_bit(AF_CHPRST_PENDING, &a->flags); + + /* Post all the async event requests */ + for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) + esas2r_start_ae_request(a, rq); + + if (!a->flash_rev[0]) + esas2r_read_flash_rev(a); + + if (!a->image_type[0]) + esas2r_read_image_type(a); + + if (a->fw_version == 0) + a->fw_rev[0] = 0; + else + sprintf(a->fw_rev, "%1d.%02d", + (int)LOBYTE(HIWORD(a->fw_version)), + (int)HIBYTE(HIWORD(a->fw_version))); + + esas2r_hdebug("firmware revision: %s", a->fw_rev); + + if (test_bit(AF_CHPRST_DETECTED, &a->flags) + && (test_bit(AF_FIRST_INIT, &a->flags))) { + esas2r_enable_chip_interrupts(a); + return true; + } + + /* initialize discovery */ + esas2r_disc_initialize(a); + + /* + * wait for the device wait time to expire here if requested. this is + * usually requested during initial driver load and possibly when + * resuming from a low power state. deferred device waiting will use + * interrupts. chip reset recovery always defers device waiting to + * avoid being in a TASKLET too long. + */ + if (init_poll) { + u32 currtime = a->disc_start_time; + u32 nexttick = 100; + u32 deltatime; + + /* + * Block Tasklets from getting scheduled and indicate this is + * polled discovery. + */ + set_bit(AF_TASKLET_SCHEDULED, &a->flags); + set_bit(AF_DISC_POLLED, &a->flags); + + /* + * Temporarily bring the disable count to zero to enable + * deferred processing. Note that the count is already zero + * after the first initialization. + */ + if (test_bit(AF_FIRST_INIT, &a->flags)) + atomic_dec(&a->disable_cnt); + + while (test_bit(AF_DISC_PENDING, &a->flags)) { + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + /* + * Determine the need for a timer tick based on the + * delta time between this and the last iteration of + * this loop. We don't use the absolute time because + * then we would have to worry about when nexttick + * wraps and currtime hasn't yet. + */ + deltatime = jiffies_to_msecs(jiffies) - currtime; + currtime += deltatime; + + /* + * Process any waiting discovery as long as the chip is + * up. If a chip reset happens during initial polling, + * we have to make sure the timer tick processes the + * doorbell indicating the firmware is ready. + */ + if (!test_bit(AF_CHPRST_PENDING, &a->flags)) + esas2r_disc_check_for_work(a); + + /* Simulate a timer tick. */ + if (nexttick <= deltatime) { + + /* Time for a timer tick */ + nexttick += 100; + esas2r_timer_tick(a); + } + + if (nexttick > deltatime) + nexttick -= deltatime; + + /* Do any deferred processing */ + if (esas2r_is_tasklet_pending(a)) + esas2r_do_tasklet_tasks(a); + + } + + if (test_bit(AF_FIRST_INIT, &a->flags)) + atomic_inc(&a->disable_cnt); + + clear_bit(AF_DISC_POLLED, &a->flags); + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); + } + + + esas2r_targ_db_report_changes(a); + + /* + * For cases where (a) the initialization messages processing may + * handle an interrupt for a port event and a discovery is waiting, but + * we are not waiting for devices, or (b) the device wait time has been + * exhausted but there is still discovery pending, start any leftover + * discovery in interrupt driven mode. + */ + esas2r_disc_start_waiting(a); + + /* Enable chip interrupts */ + a->int_mask = ESAS2R_INT_STS_MASK; + esas2r_enable_chip_interrupts(a); + esas2r_enable_heartbeat(a); + rslt = true; + +exit: + /* + * Regardless of whether initialization was successful, certain things + * need to get done before we exit. + */ + + if (test_bit(AF_CHPRST_DETECTED, &a->flags) && + test_bit(AF_FIRST_INIT, &a->flags)) { + /* + * Reinitialization was performed during the first + * initialization. Only clear the chip reset flag so the + * original device polling is not cancelled. + */ + if (!rslt) + clear_bit(AF_CHPRST_PENDING, &a->flags); + } else { + /* First initialization or a subsequent re-init is complete. */ + if (!rslt) { + clear_bit(AF_CHPRST_PENDING, &a->flags); + clear_bit(AF_DISC_PENDING, &a->flags); + } + + + /* Enable deferred processing after the first initialization. */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + clear_bit(AF_FIRST_INIT, &a->flags); + + if (atomic_dec_return(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + } + } + + return rslt; +} + +void esas2r_reset_adapter(struct esas2r_adapter *a) +{ + set_bit(AF_OS_RESET, &a->flags); + esas2r_local_reset_adapter(a); + esas2r_schedule_tasklet(a); +} + +void esas2r_reset_chip(struct esas2r_adapter *a) +{ + if (!esas2r_is_adapter_present(a)) + return; + + /* + * Before we reset the chip, save off the VDA core dump. The VDA core + * dump is located in the upper 512KB of the onchip SRAM. Make sure + * to not overwrite a previous crash that was saved. + */ + if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && + !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) { + esas2r_read_mem_block(a, + a->fw_coredump_buff, + MW_DATA_ADDR_SRAM + 0x80000, + ESAS2R_FWCOREDUMP_SZ); + + set_bit(AF2_COREDUMP_SAVED, &a->flags2); + } + + clear_bit(AF2_COREDUMP_AVAIL, &a->flags2); + + /* Reset the chip */ + if (a->pcid->revision == MVR_FREY_B2) + esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2, + MU_CTL_IN_FULL_RST2); + else + esas2r_write_register_dword(a, MU_CTL_STATUS_IN, + MU_CTL_IN_FULL_RST); + + + /* Stall a little while to let the reset condition clear */ + mdelay(10); +} + +static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a) +{ + u32 starttime; + u32 doorbell; + + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_POWER_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 30000) { + esas2r_hdebug("Timeout waiting for power down"); + break; + } + } +} + +/* + * Perform power management processing including managing device states, adapter + * states, interrupts, and I/O. + */ +void esas2r_power_down(struct esas2r_adapter *a) +{ + set_bit(AF_POWER_MGT, &a->flags); + set_bit(AF_POWER_DOWN, &a->flags); + + if (!test_bit(AF_DEGRADED_MODE, &a->flags)) { + u32 starttime; + u32 doorbell; + + /* + * We are currently running OK and will be reinitializing later. + * increment the disable count to coordinate with + * esas2r_init_adapter. We don't have to do this in degraded + * mode since we never enabled interrupts in the first place. + */ + esas2r_disable_chip_interrupts(a); + esas2r_disable_heartbeat(a); + + /* wait for any VDA activity to clear before continuing */ + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_MSG_IFC_DOWN); + starttime = jiffies_to_msecs(jiffies); + + while (true) { + doorbell = + esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell & DRBL_MSG_IFC_DOWN) { + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + break; + } + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > 3000) { + esas2r_hdebug( + "timeout waiting for interface down"); + break; + } + } + + /* + * For versions of firmware that support it tell them the driver + * is powering down. + */ + if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2)) + esas2r_power_down_notify_firmware(a); + } + + /* Suspend I/O processing. */ + set_bit(AF_OS_RESET, &a->flags); + set_bit(AF_DISC_PENDING, &a->flags); + set_bit(AF_CHPRST_PENDING, &a->flags); + + esas2r_process_adapter_reset(a); + + /* Remove devices now that I/O is cleaned up. */ + a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a); + esas2r_targ_db_remove_all(a, false); +} + +/* + * Perform power management processing including managing device states, adapter + * states, interrupts, and I/O. + */ +bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll) +{ + bool ret; + + clear_bit(AF_POWER_DOWN, &a->flags); + esas2r_init_pci_cfg_space(a); + set_bit(AF_FIRST_INIT, &a->flags); + atomic_inc(&a->disable_cnt); + + /* reinitialize the adapter */ + ret = esas2r_check_adapter(a); + if (!esas2r_init_adapter_hw(a, init_poll)) + ret = false; + + /* send the reset asynchronous event */ + esas2r_send_reset_ae(a, true); + + /* clear this flag after initialization. */ + clear_bit(AF_POWER_MGT, &a->flags); + return ret; +} + +bool esas2r_is_adapter_present(struct esas2r_adapter *a) +{ + if (test_bit(AF_NOT_PRESENT, &a->flags)) + return false; + + if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { + set_bit(AF_NOT_PRESENT, &a->flags); + + return false; + } + return true; +} + +const char *esas2r_get_model_name(struct esas2r_adapter *a) +{ + switch (a->pcid->subsystem_device) { + case ATTO_ESAS_R680: + return "ATTO ExpressSAS R680"; + + case ATTO_ESAS_R608: + return "ATTO ExpressSAS R608"; + + case ATTO_ESAS_R60F: + return "ATTO ExpressSAS R60F"; + + case ATTO_ESAS_R6F0: + return "ATTO ExpressSAS R6F0"; + + case ATTO_ESAS_R644: + return "ATTO ExpressSAS R644"; + + case ATTO_ESAS_R648: + return "ATTO ExpressSAS R648"; + + case ATTO_TSSC_3808: + return "ATTO ThunderStream SC 3808D"; + + case ATTO_TSSC_3808E: + return "ATTO ThunderStream SC 3808E"; + + case ATTO_TLSH_1068: + return "ATTO ThunderLink SH 1068"; + } + + return "ATTO SAS Controller"; +} + +const char *esas2r_get_model_name_short(struct esas2r_adapter *a) +{ + switch (a->pcid->subsystem_device) { + case ATTO_ESAS_R680: + return "R680"; + + case ATTO_ESAS_R608: + return "R608"; + + case ATTO_ESAS_R60F: + return "R60F"; + + case ATTO_ESAS_R6F0: + return "R6F0"; + + case ATTO_ESAS_R644: + return "R644"; + + case ATTO_ESAS_R648: + return "R648"; + + case ATTO_TSSC_3808: + return "SC 3808D"; + + case ATTO_TSSC_3808E: + return "SC 3808E"; + + case ATTO_TLSH_1068: + return "SH 1068"; + } + + return "unknown"; +} diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c new file mode 100644 index 00000000000..f16d6bcf9bb --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_int.c @@ -0,0 +1,942 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_int.c + * esas2r interrupt handling + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +/* Local function prototypes */ +static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell); +static void esas2r_get_outbound_responses(struct esas2r_adapter *a); +static void esas2r_process_bus_reset(struct esas2r_adapter *a); + +/* + * Poll the adapter for interrupts and service them. + * This function handles both legacy interrupts and MSI. + */ +void esas2r_polled_interrupt(struct esas2r_adapter *a) +{ + u32 intstat; + u32 doorbell; + + esas2r_disable_chip_interrupts(a); + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (intstat & MU_INTSTAT_POST_OUT) { + /* clear the interrupt */ + + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + + esas2r_get_outbound_responses(a); + } + + if (intstat & MU_INTSTAT_DRBL) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + esas2r_enable_chip_interrupts(a); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +/* + * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler + * schedules a TASKLET to process events, whereas the MSI handler just + * processes interrupt events directly. + */ +irqreturn_t esas2r_interrupt(int irq, void *dev_id) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; + + if (!esas2r_adapter_interrupt_pending(a)) + return IRQ_NONE; + + set_bit(AF2_INT_PENDING, &a->flags2); + esas2r_schedule_tasklet(a); + + return IRQ_HANDLED; +} + +void esas2r_adapter_interrupt(struct esas2r_adapter *a) +{ + u32 doorbell; + + if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) { + /* clear the interrupt */ + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + esas2r_get_outbound_responses(a); + } + + if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + a->int_mask = ESAS2R_INT_STS_MASK; + + esas2r_enable_chip_interrupts(a); + + if (likely(atomic_read(&a->disable_cnt) == 0)) + esas2r_do_deferred_processes(a); +} + +irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id; + u32 intstat; + u32 doorbell; + + intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); + + if (likely(intstat & MU_INTSTAT_POST_OUT)) { + /* clear the interrupt */ + + esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT, + MU_OLIS_INT); + esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT); + + esas2r_get_outbound_responses(a); + } + + if (unlikely(intstat & MU_INTSTAT_DRBL)) { + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell != 0) + esas2r_doorbell_interrupt(a, doorbell); + } + + /* + * Work around a chip bug and force a new MSI to be sent if one is + * still pending. + */ + esas2r_disable_chip_interrupts(a); + esas2r_enable_chip_interrupts(a); + + if (likely(atomic_read(&a->disable_cnt) == 0)) + esas2r_do_deferred_processes(a); + + esas2r_do_tasklet_tasks(a); + + return 1; +} + + + +static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct atto_vda_ob_rsp *rsp) +{ + + /* + * For I/O requests, only copy the response if an error + * occurred and setup a callback to do error processing. + */ + if (unlikely(rq->req_stat != RS_SUCCESS)) { + memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); + + if (rq->req_stat == RS_ABORTED) { + if (rq->timeout > RQ_MAX_TIMEOUT) + rq->req_stat = RS_TIMEOUT; + } else if (rq->req_stat == RS_SCSI_ERROR) { + u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; + + esas2r_trace("scsistatus: %x", scsistatus); + + /* Any of these are a good result. */ + if (scsistatus == SAM_STAT_GOOD || scsistatus == + SAM_STAT_CONDITION_MET || scsistatus == + SAM_STAT_INTERMEDIATE || scsistatus == + SAM_STAT_INTERMEDIATE_CONDITION_MET) { + rq->req_stat = RS_SUCCESS; + rq->func_rsp.scsi_rsp.scsi_stat = + SAM_STAT_GOOD; + } + } + } +} + +static void esas2r_get_outbound_responses(struct esas2r_adapter *a) +{ + struct atto_vda_ob_rsp *rsp; + u32 rspput_ptr; + u32 rspget_ptr; + struct esas2r_request *rq; + u32 handle; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* Get the outbound limit and pointers */ + rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR; + rspget_ptr = a->last_read; + + esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr); + + /* If we don't have anything to process, get out */ + if (unlikely(rspget_ptr == rspput_ptr)) { + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_trace_exit(); + return; + } + + /* Make sure the firmware is healthy */ + if (unlikely(rspput_ptr >= a->list_size)) { + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_bugon(); + esas2r_local_reset_adapter(a); + esas2r_trace_exit(); + return; + } + + do { + rspget_ptr++; + + if (rspget_ptr >= a->list_size) + rspget_ptr = 0; + + rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr + + rspget_ptr; + + handle = rsp->handle; + + /* Verify the handle range */ + if (unlikely(LOWORD(handle) == 0 + || LOWORD(handle) > num_requests + + num_ae_requests + 1)) { + esas2r_bugon(); + continue; + } + + /* Get the request for this handle */ + rq = a->req_table[LOWORD(handle)]; + + if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) { + esas2r_bugon(); + continue; + } + + list_del(&rq->req_list); + + /* Get the completion status */ + rq->req_stat = rsp->req_stat; + + esas2r_trace("handle: %x", handle); + esas2r_trace("rq: %p", rq); + esas2r_trace("req_status: %x", rq->req_stat); + + if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { + esas2r_handle_outbound_rsp_err(a, rq, rsp); + } else { + /* + * Copy the outbound completion struct for non-I/O + * requests. + */ + memcpy(&rq->func_rsp, &rsp->func_rsp, + sizeof(rsp->func_rsp)); + } + + /* Queue the request for completion. */ + list_add_tail(&rq->comp_list, &comp_list); + + } while (rspget_ptr != rspput_ptr); + + a->last_read = rspget_ptr; + spin_unlock_irqrestore(&a->queue_lock, flags); + + esas2r_comp_list_drain(a, &comp_list); + esas2r_trace_exit(); +} + +/* + * Perform all deferred processes for the adapter. Deferred + * processes can only be done while the current interrupt + * disable_cnt for the adapter is zero. + */ +void esas2r_do_deferred_processes(struct esas2r_adapter *a) +{ + int startreqs = 2; + struct esas2r_request *rq; + unsigned long flags; + + /* + * startreqs is used to control starting requests + * that are on the deferred queue + * = 0 - do not start any requests + * = 1 - can start discovery requests + * = 2 - can start any request + */ + + if (test_bit(AF_CHPRST_PENDING, &a->flags) || + test_bit(AF_FLASHING, &a->flags)) + startreqs = 0; + else if (test_bit(AF_DISC_PENDING, &a->flags)) + startreqs = 1; + + atomic_inc(&a->disable_cnt); + + /* Clear off the completed list to be processed later. */ + + if (esas2r_is_tasklet_pending(a)) { + esas2r_schedule_tasklet(a); + + startreqs = 0; + } + + /* + * If we can start requests then traverse the defer queue + * looking for requests to start or complete + */ + if (startreqs && !list_empty(&a->defer_list)) { + LIST_HEAD(comp_list); + struct list_head *element, *next; + + spin_lock_irqsave(&a->queue_lock, flags); + + list_for_each_safe(element, next, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, + req_list); + + if (rq->req_stat != RS_PENDING) { + list_del(element); + list_add_tail(&rq->comp_list, &comp_list); + } + /* + * Process discovery and OS requests separately. We + * can't hold up discovery requests when discovery is + * pending. In general, there may be different sets of + * conditions for starting different types of requests. + */ + else if (rq->req_type == RT_DISC_REQ) { + list_del(element); + esas2r_disc_local_start_request(a, rq); + } else if (startreqs == 2) { + list_del(element); + esas2r_local_start_request(a, rq); + + /* + * Flashing could have been set by last local + * start + */ + if (test_bit(AF_FLASHING, &a->flags)) + break; + } + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_comp_list_drain(a, &comp_list); + } + + atomic_dec(&a->disable_cnt); +} + +/* + * Process an adapter reset (or one that is about to happen) + * by making sure all outstanding requests are completed that + * haven't been already. + */ +void esas2r_process_adapter_reset(struct esas2r_adapter *a) +{ + struct esas2r_request *rq = &a->general_req; + unsigned long flags; + struct esas2r_disc_context *dc; + + LIST_HEAD(comp_list); + struct list_head *element; + + esas2r_trace_enter(); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* abort the active discovery, if any. */ + + if (rq->interrupt_cx) { + dc = (struct esas2r_disc_context *)rq->interrupt_cx; + + dc->disc_evt = 0; + + clear_bit(AF_DISC_IN_PROG, &a->flags); + } + + /* + * just clear the interrupt callback for now. it will be dequeued if + * and when we find it on the active queue and we don't want the + * callback called. also set the dummy completion callback in case we + * were doing an I/O request. + */ + + rq->interrupt_cx = NULL; + rq->interrupt_cb = NULL; + + rq->comp_cb = esas2r_dummy_complete; + + /* Reset the read and write pointers */ + + *a->outbound_copy = + a->last_write = + a->last_read = a->list_size - 1; + + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); + + /* Kill all the requests on the active list */ + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->req_stat == RS_STARTED) + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, &comp_list); + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + esas2r_comp_list_drain(a, &comp_list); + esas2r_process_bus_reset(a); + esas2r_trace_exit(); +} + +static void esas2r_process_bus_reset(struct esas2r_adapter *a) +{ + struct esas2r_request *rq; + struct list_head *element; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + + esas2r_hdebug("reset detected"); + + spin_lock_irqsave(&a->queue_lock, flags); + + /* kill all the requests on the deferred queue */ + list_for_each(element, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, &comp_list); + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + esas2r_comp_list_drain(a, &comp_list); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + clear_bit(AF_OS_RESET, &a->flags); + + esas2r_trace_exit(); +} + +static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a) +{ + + clear_bit(AF_CHPRST_NEEDED, &a->flags); + clear_bit(AF_BUSRST_NEEDED, &a->flags); + clear_bit(AF_BUSRST_DETECTED, &a->flags); + clear_bit(AF_BUSRST_PENDING, &a->flags); + /* + * Make sure we don't get attempt more than 3 resets + * when the uptime between resets does not exceed one + * minute. This will stop any situation where there is + * really something wrong with the hardware. The way + * this works is that we start with uptime ticks at 0. + * Each time we do a reset, we add 20 seconds worth to + * the count. Each time a timer tick occurs, as long + * as a chip reset is not pending, we decrement the + * tick count. If the uptime ticks ever gets to 60 + * seconds worth, we disable the adapter from that + * point forward. Three strikes, you're out. + */ + if (!esas2r_is_adapter_present(a) || (a->chip_uptime >= + ESAS2R_CHP_UPTIME_MAX)) { + esas2r_hdebug("*** adapter disabled ***"); + + /* + * Ok, some kind of hard failure. Make sure we + * exit this loop with chip interrupts + * permanently disabled so we don't lock up the + * entire system. Also flag degraded mode to + * prevent the heartbeat from trying to recover. + */ + + set_bit(AF_DEGRADED_MODE, &a->flags); + set_bit(AF_DISABLED, &a->flags); + clear_bit(AF_CHPRST_PENDING, &a->flags); + clear_bit(AF_DISC_PENDING, &a->flags); + + esas2r_disable_chip_interrupts(a); + a->int_mask = 0; + esas2r_process_adapter_reset(a); + + esas2r_log(ESAS2R_LOG_CRIT, + "Adapter disabled because of hardware failure"); + } else { + bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags); + + if (!alrdyrst) + /* + * Only disable interrupts if this is + * the first reset attempt. + */ + esas2r_disable_chip_interrupts(a); + + if ((test_bit(AF_POWER_MGT, &a->flags)) && + !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) { + /* + * Don't reset the chip on the first + * deferred power up attempt. + */ + } else { + esas2r_hdebug("*** resetting chip ***"); + esas2r_reset_chip(a); + } + + /* Kick off the reinitialization */ + a->chip_uptime += ESAS2R_CHP_UPTIME_CNT; + a->chip_init_time = jiffies_to_msecs(jiffies); + if (!test_bit(AF_POWER_MGT, &a->flags)) { + esas2r_process_adapter_reset(a); + + if (!alrdyrst) { + /* Remove devices now that I/O is cleaned up. */ + a->prev_dev_cnt = + esas2r_targ_db_get_tgt_cnt(a); + esas2r_targ_db_remove_all(a, false); + } + } + + a->int_mask = 0; + } +} + +static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a) +{ + while (test_bit(AF_CHPRST_DETECTED, &a->flags)) { + /* + * Balance the enable in esas2r_initadapter_hw. + * Esas2r_power_down already took care of it for power + * management. + */ + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_POWER_MGT, &a->flags)) + esas2r_disable_chip_interrupts(a); + + /* Reinitialize the chip. */ + esas2r_check_adapter(a); + esas2r_init_adapter_hw(a, 0); + + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) + break; + + if (test_bit(AF_POWER_MGT, &a->flags)) { + /* Recovery from power management. */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + /* Chip reset during normal power up */ + esas2r_log(ESAS2R_LOG_CRIT, + "The firmware was reset during a normal power-up sequence"); + } else { + /* Deferred power up complete. */ + clear_bit(AF_POWER_MGT, &a->flags); + esas2r_send_reset_ae(a, true); + } + } else { + /* Recovery from online chip reset. */ + if (test_bit(AF_FIRST_INIT, &a->flags)) { + /* Chip reset during driver load */ + } else { + /* Chip reset after driver load */ + esas2r_send_reset_ae(a, false); + } + + esas2r_log(ESAS2R_LOG_CRIT, + "Recovering from a chip reset while the chip was online"); + } + + clear_bit(AF_CHPRST_STARTED, &a->flags); + esas2r_enable_chip_interrupts(a); + + /* + * Clear this flag last! this indicates that the chip has been + * reset already during initialization. + */ + clear_bit(AF_CHPRST_DETECTED, &a->flags); + } +} + + +/* Perform deferred tasks when chip interrupts are disabled */ +void esas2r_do_tasklet_tasks(struct esas2r_adapter *a) +{ + + if (test_bit(AF_CHPRST_NEEDED, &a->flags) || + test_bit(AF_CHPRST_DETECTED, &a->flags)) { + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) + esas2r_chip_rst_needed_during_tasklet(a); + + esas2r_handle_chip_rst_during_tasklet(a); + } + + if (test_bit(AF_BUSRST_NEEDED, &a->flags)) { + esas2r_hdebug("hard resetting bus"); + + clear_bit(AF_BUSRST_NEEDED, &a->flags); + + if (test_bit(AF_FLASHING, &a->flags)) + set_bit(AF_BUSRST_DETECTED, &a->flags); + else + esas2r_write_register_dword(a, MU_DOORBELL_IN, + DRBL_RESET_BUS); + } + + if (test_bit(AF_BUSRST_DETECTED, &a->flags)) { + esas2r_process_bus_reset(a); + + esas2r_log_dev(ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "scsi_report_bus_reset() called"); + + scsi_report_bus_reset(a->host, 0); + + clear_bit(AF_BUSRST_DETECTED, &a->flags); + clear_bit(AF_BUSRST_PENDING, &a->flags); + + esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete"); + } + + if (test_bit(AF_PORT_CHANGE, &a->flags)) { + clear_bit(AF_PORT_CHANGE, &a->flags); + + esas2r_targ_db_report_changes(a); + } + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell) +{ + if (!(doorbell & DRBL_FORCE_INT)) { + esas2r_trace_enter(); + esas2r_trace("doorbell: %x", doorbell); + } + + /* First clear the doorbell bits */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell); + + if (doorbell & DRBL_RESET_BUS) + set_bit(AF_BUSRST_DETECTED, &a->flags); + + if (doorbell & DRBL_FORCE_INT) + clear_bit(AF_HEARTBEAT, &a->flags); + + if (doorbell & DRBL_PANIC_REASON_MASK) { + esas2r_hdebug("*** Firmware Panic ***"); + esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked"); + } + + if (doorbell & DRBL_FW_RESET) { + set_bit(AF2_COREDUMP_AVAIL, &a->flags2); + esas2r_local_reset_adapter(a); + } + + if (!(doorbell & DRBL_FORCE_INT)) + esas2r_trace_exit(); +} + +void esas2r_force_interrupt(struct esas2r_adapter *a) +{ + esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT | + DRBL_DRV_VER); +} + + +static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae, + u16 target, u32 length) +{ + struct esas2r_target *t = a->targetdb + target; + u32 cplen = length; + unsigned long flags; + + if (cplen > sizeof(t->lu_event)) + cplen = sizeof(t->lu_event); + + esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent); + esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate); + + spin_lock_irqsave(&a->mem_lock, flags); + + t->new_target_state = TS_INVALID; + + if (ae->lu.dwevent & VDAAE_LU_LOST) { + t->new_target_state = TS_NOT_PRESENT; + } else { + switch (ae->lu.bystate) { + case VDAAE_LU_NOT_PRESENT: + case VDAAE_LU_OFFLINE: + case VDAAE_LU_DELETED: + case VDAAE_LU_FACTORY_DISABLED: + t->new_target_state = TS_NOT_PRESENT; + break; + + case VDAAE_LU_ONLINE: + case VDAAE_LU_DEGRADED: + t->new_target_state = TS_PRESENT; + break; + } + } + + if (t->new_target_state != TS_INVALID) { + memcpy(&t->lu_event, &ae->lu, cplen); + + esas2r_disc_queue_event(a, DCDE_DEV_CHANGE); + } + + spin_unlock_irqrestore(&a->mem_lock, flags); +} + + + +void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + union atto_vda_ae *ae = + (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data; + u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length); + union atto_vda_ae *last = + (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data + + length); + + esas2r_trace_enter(); + esas2r_trace("length: %d", length); + + if (length > sizeof(struct atto_vda_ae_data) + || (length & 3) != 0 + || length == 0) { + esas2r_log(ESAS2R_LOG_WARN, + "The AE request response length (%p) is too long: %d", + rq, length); + + esas2r_hdebug("aereq->length (0x%x) too long", length); + esas2r_bugon(); + + last = ae; + } + + while (ae < last) { + u16 target; + + esas2r_trace("ae: %p", ae); + esas2r_trace("ae->hdr: %p", &(ae->hdr)); + + length = ae->hdr.bylength; + + if (length > (u32)((u8 *)last - (u8 *)ae) + || (length & 3) != 0 + || length == 0) { + esas2r_log(ESAS2R_LOG_CRIT, + "the async event length is invalid (%p): %d", + ae, length); + + esas2r_hdebug("ae->hdr.length (0x%x) invalid", length); + esas2r_bugon(); + + break; + } + + esas2r_nuxi_ae_data(ae); + + esas2r_queue_fw_event(a, fw_event_vda_ae, ae, + sizeof(union atto_vda_ae)); + + switch (ae->hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + + if (ae->raid.dwflags & (VDAAE_GROUP_STATE + | VDAAE_RBLD_STATE + | VDAAE_MEMBER_CHG + | VDAAE_PART_CHG)) { + esas2r_log(ESAS2R_LOG_INFO, + "RAID event received - name:%s rebuild_state:%d group_state:%d", + ae->raid.acname, + ae->raid.byrebuild_state, + ae->raid.bygroup_state); + } + + break; + + case VDAAE_HDR_TYPE_LU: + esas2r_log(ESAS2R_LOG_INFO, + "LUN event received: event:%d target_id:%d LUN:%d state:%d", + ae->lu.dwevent, + ae->lu.id.tgtlun.wtarget_id, + ae->lu.id.tgtlun.bylun, + ae->lu.bystate); + + target = ae->lu.id.tgtlun.wtarget_id; + + if (target < ESAS2R_MAX_TARGETS) + esas2r_lun_event(a, ae, target, length); + + break; + + case VDAAE_HDR_TYPE_DISK: + esas2r_log(ESAS2R_LOG_INFO, "Disk event received"); + break; + + default: + + /* Silently ignore the rest and let the apps deal with + * them. + */ + + break; + } + + ae = (union atto_vda_ae *)((u8 *)ae + length); + } + + /* Now requeue it. */ + esas2r_start_ae_request(a, rq); + esas2r_trace_exit(); +} + +/* Send an asynchronous event for a chip reset or power management. */ +void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt) +{ + struct atto_vda_ae_hdr ae; + + if (pwr_mgt) + ae.bytype = VDAAE_HDR_TYPE_PWRMGT; + else + ae.bytype = VDAAE_HDR_TYPE_RESET; + + ae.byversion = VDAAE_HDR_VER_0; + ae.byflags = 0; + ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr); + + if (pwr_mgt) + esas2r_hdebug("*** sending power management AE ***"); + else + esas2r_hdebug("*** sending reset AE ***"); + + esas2r_queue_fw_event(a, fw_event_vda_ae, &ae, + sizeof(union atto_vda_ae)); +} + +void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq) +{} + +static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u8 snslen, snslen2; + + snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len; + + if (snslen > rq->sense_len) + snslen = rq->sense_len; + + if (snslen) { + if (rq->sense_buf) + memcpy(rq->sense_buf, rq->data_buf, snslen); + else + rq->sense_buf = (u8 *)rq->data_buf; + + /* See about possible sense data */ + if (snslen2 > 0x0c) { + u8 *s = (u8 *)rq->data_buf; + + esas2r_trace_enter(); + + /* Report LUNS data has changed */ + if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) { + esas2r_trace("rq->target_id: %d", + rq->target_id); + esas2r_target_state_changed(a, rq->target_id, + TS_LUN_CHANGE); + } + + esas2r_trace("add_sense_key=%x", s[0x0c]); + esas2r_trace("add_sense_qual=%x", s[0x0d]); + esas2r_trace_exit(); + } + } + + rq->sense_len = snslen; +} + + +void esas2r_complete_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + if (rq->vrq->scsi.function == VDA_FUNC_FLASH + && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) + clear_bit(AF_FLASHING, &a->flags); + + /* See if we setup a callback to do special processing */ + + if (rq->interrupt_cb) { + (*rq->interrupt_cb)(a, rq); + + if (rq->req_stat == RS_PENDING) { + esas2r_start_request(a, rq); + return; + } + } + + if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI) + && unlikely(rq->req_stat != RS_SUCCESS)) { + esas2r_check_req_rsp_sense(a, rq); + esas2r_log_request_failure(a, rq); + } + + (*rq->comp_cb)(a, rq); +} diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c new file mode 100644 index 00000000000..a8df916cd57 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_io.c @@ -0,0 +1,877 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_io.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + struct esas2r_target *t = NULL; + struct esas2r_request *startrq = rq; + unsigned long flags; + + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) || + test_bit(AF_POWER_DOWN, &a->flags))) { + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) + rq->req_stat = RS_SEL2; + else + rq->req_stat = RS_DEGRADED; + } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { + t = a->targetdb + rq->target_id; + + if (unlikely(t >= a->targetdb_end + || !(t->flags & TF_USED))) { + rq->req_stat = RS_SEL; + } else { + /* copy in the target ID. */ + rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); + + /* + * Test if we want to report RS_SEL for missing target. + * Note that if AF_DISC_PENDING is set than this will + * go on the defer queue. + */ + if (unlikely(t->target_state != TS_PRESENT && + !test_bit(AF_DISC_PENDING, &a->flags))) + rq->req_stat = RS_SEL; + } + } + + if (unlikely(rq->req_stat != RS_PENDING)) { + esas2r_complete_request(a, rq); + return; + } + + esas2r_trace("rq=%p", rq); + esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle); + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + esas2r_trace("rq->target_id=%d", rq->target_id); + esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags); + } + + spin_lock_irqsave(&a->queue_lock, flags); + + if (likely(list_empty(&a->defer_list) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_FLASHING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags))) + esas2r_local_start_request(a, startrq); + else + list_add_tail(&startrq->req_list, &a->defer_list); + + spin_unlock_irqrestore(&a->queue_lock, flags); +} + +/* + * Starts the specified request. all requests have RS_PENDING set when this + * routine is called. The caller is usually esas2r_start_request, but + * esas2r_do_deferred_processes will start request that are deferred. + * + * The caller must ensure that requests can be started. + * + * esas2r_start_request will defer a request if there are already requests + * waiting or there is a chip reset pending. once the reset condition clears, + * esas2r_do_deferred_processes will call this function to start the request. + * + * When a request is started, it is placed on the active list and queued to + * the controller. + */ +void esas2r_local_start_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_trace_enter(); + esas2r_trace("rq=%p", rq); + esas2r_trace("rq->vrq:%p", rq->vrq); + esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr); + + if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH + && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) + set_bit(AF_FLASHING, &a->flags); + + list_add_tail(&rq->req_list, &a->active_list); + esas2r_start_vda_request(a, rq); + esas2r_trace_exit(); + return; +} + +void esas2r_start_vda_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct esas2r_inbound_list_source_entry *element; + u32 dw; + + rq->req_stat = RS_STARTED; + /* + * Calculate the inbound list entry location and the current state of + * toggle bit. + */ + a->last_write++; + if (a->last_write >= a->list_size) { + a->last_write = 0; + /* update the toggle bit */ + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) + clear_bit(AF_COMM_LIST_TOGGLE, &a->flags); + else + set_bit(AF_COMM_LIST_TOGGLE, &a->flags); + } + + element = + (struct esas2r_inbound_list_source_entry *)a->inbound_list_md. + virt_addr + + a->last_write; + + /* Set the VDA request size if it was never modified */ + if (rq->vda_req_sz == RQ_SIZE_DEFAULT) + rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32)); + + element->address = cpu_to_le64(rq->vrq_md->phys_addr); + element->length = cpu_to_le32(rq->vda_req_sz); + + /* Update the write pointer */ + dw = a->last_write; + + if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) + dw |= MU_ILW_TOGGLE; + + esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); + esas2r_trace("dw:%x", dw); + esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz); + esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw); +} + +/* + * Build the scatter/gather list for an I/O request according to the + * specifications placed in the s/g context. The caller must initialize + * context prior to the initial call by calling esas2r_sgc_init(). + */ +bool esas2r_build_sg_list_sge(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + union atto_vda_req *vrq = rq->vrq; + + while (sgc->length) { + u32 rem = 0; + u64 addr; + u32 len; + + len = (*sgc->get_phys_addr)(sgc, &addr); + + if (unlikely(len == 0)) + return false; + + /* if current length is more than what's left, stop there */ + if (unlikely(len > sgc->length)) + len = sgc->length; + +another_entry: + /* limit to a round number less than the maximum length */ + if (len > SGE_LEN_MAX) { + /* + * Save the remainder of the split. Whenever we limit + * an entry we come back around to build entries out + * of the leftover. We do this to prevent multiple + * calls to the get_phys_addr() function for an SGE + * that is too large. + */ + rem = len - SGE_LEN_MAX; + len = SGE_LEN_MAX; + } + + /* See if we need to allocate a new SGL */ + if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { + u8 sgelen; + struct esas2r_mem_desc *sgl; + + /* + * If no SGls are available, return failure. The + * caller can call us later with the current context + * to pick up here. + */ + sgl = esas2r_alloc_sgl(a); + + if (unlikely(sgl == NULL)) + return false; + + /* Calculate the length of the last SGE filled in */ + sgelen = (u8)((u8 *)sgc->sge.a64.curr + - (u8 *)sgc->sge.a64.last); + + /* + * Copy the last SGE filled in to the first entry of + * the new SGL to make room for the chain entry. + */ + memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); + + /* Figure out the new curr pointer in the new segment */ + sgc->sge.a64.curr = + (struct atto_vda_sge *)((u8 *)sgl->virt_addr + + sgelen); + + /* Set the limit pointer and build the chain entry */ + sgc->sge.a64.limit = + (struct atto_vda_sge *)((u8 *)sgl->virt_addr + + sgl_page_size + - sizeof(struct + atto_vda_sge)); + sgc->sge.a64.last->length = cpu_to_le32( + SGE_CHAIN | SGE_ADDR_64); + sgc->sge.a64.last->address = + cpu_to_le64(sgl->phys_addr); + + /* + * Now, if there was a previous chain entry, then + * update it to contain the length of this segment + * and size of this chain. otherwise this is the + * first SGL, so set the chain_offset in the request. + */ + if (sgc->sge.a64.chain) { + sgc->sge.a64.chain->length |= + cpu_to_le32( + ((u8 *)(sgc->sge.a64. + last + 1) + - (u8 *)rq->sg_table-> + virt_addr) + + sizeof(struct atto_vda_sge) * + LOBIT(SGE_CHAIN_SZ)); + } else { + vrq->scsi.chain_offset = (u8) + ((u8 *)sgc-> + sge.a64.last - + (u8 *)vrq); + + /* + * This is the first SGL, so set the + * chain_offset and the VDA request size in + * the request. + */ + rq->vda_req_sz = + (vrq->scsi.chain_offset + + sizeof(struct atto_vda_sge) + + 3) + / sizeof(u32); + } + + /* + * Remember this so when we get a new SGL filled in we + * can update the length of this chain entry. + */ + sgc->sge.a64.chain = sgc->sge.a64.last; + + /* Now link the new SGL onto the primary request. */ + list_add(&sgl->next_desc, &rq->sg_table_head); + } + + /* Update last one filled in */ + sgc->sge.a64.last = sgc->sge.a64.curr; + + /* Build the new SGE and update the S/G context */ + sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len); + sgc->sge.a64.curr->address = cpu_to_le32(addr); + sgc->sge.a64.curr++; + sgc->cur_offset += len; + sgc->length -= len; + + /* + * Check if we previously split an entry. If so we have to + * pick up where we left off. + */ + if (rem) { + addr += len; + len = rem; + rem = 0; + goto another_entry; + } + } + + /* Mark the end of the SGL */ + sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST); + + /* + * If there was a previous chain entry, update the length to indicate + * the length of this last segment. + */ + if (sgc->sge.a64.chain) { + sgc->sge.a64.chain->length |= cpu_to_le32( + ((u8 *)(sgc->sge.a64.curr) - + (u8 *)rq->sg_table->virt_addr)); + } else { + u16 reqsize; + + /* + * The entire VDA request was not used so lets + * set the size of the VDA request to be DMA'd + */ + reqsize = + ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq) + + sizeof(struct atto_vda_sge) + 3) / sizeof(u32); + + /* + * Only update the request size if it is bigger than what is + * already there. We can come in here twice for some management + * commands. + */ + if (reqsize > rq->vda_req_sz) + rq->vda_req_sz = reqsize; + } + return true; +} + + +/* + * Create PRD list for each I-block consumed by the command. This routine + * determines how much data is required from each I-block being consumed + * by the command. The first and last I-blocks can be partials and all of + * the I-blocks in between are for a full I-block of data. + * + * The interleave size is used to determine the number of bytes in the 1st + * I-block and the remaining I-blocks are what remeains. + */ +static bool esas2r_build_prd_iblk(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + u64 addr; + u32 len; + struct esas2r_mem_desc *sgl; + u32 numchain = 1; + u32 rem = 0; + + while (sgc->length) { + /* Get the next address/length pair */ + + len = (*sgc->get_phys_addr)(sgc, &addr); + + if (unlikely(len == 0)) + return false; + + /* If current length is more than what's left, stop there */ + + if (unlikely(len > sgc->length)) + len = sgc->length; + +another_entry: + /* Limit to a round number less than the maximum length */ + + if (len > PRD_LEN_MAX) { + /* + * Save the remainder of the split. whenever we limit + * an entry we come back around to build entries out + * of the leftover. We do this to prevent multiple + * calls to the get_phys_addr() function for an SGE + * that is too large. + */ + rem = len - PRD_LEN_MAX; + len = PRD_LEN_MAX; + } + + /* See if we need to allocate a new SGL */ + if (sgc->sge.prd.sge_cnt == 0) { + if (len == sgc->length) { + /* + * We only have 1 PRD entry left. + * It can be placed where the chain + * entry would have gone + */ + + /* Build the simple SGE */ + sgc->sge.prd.curr->ctl_len = cpu_to_le32( + PRD_DATA | len); + sgc->sge.prd.curr->address = cpu_to_le64(addr); + + /* Adjust length related fields */ + sgc->cur_offset += len; + sgc->length -= len; + + /* We use the reserved chain entry for data */ + numchain = 0; + + break; + } + + if (sgc->sge.prd.chain) { + /* + * Fill # of entries of current SGL in previous + * chain the length of this current SGL may not + * full. + */ + + sgc->sge.prd.chain->ctl_len |= cpu_to_le32( + sgc->sge.prd.sgl_max_cnt); + } + + /* + * If no SGls are available, return failure. The + * caller can call us later with the current context + * to pick up here. + */ + + sgl = esas2r_alloc_sgl(a); + + if (unlikely(sgl == NULL)) + return false; + + /* + * Link the new SGL onto the chain + * They are in reverse order + */ + list_add(&sgl->next_desc, &rq->sg_table_head); + + /* + * An SGL was just filled in and we are starting + * a new SGL. Prime the chain of the ending SGL with + * info that points to the new SGL. The length gets + * filled in when the new SGL is filled or ended + */ + + sgc->sge.prd.chain = sgc->sge.prd.curr; + + sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN); + sgc->sge.prd.chain->address = + cpu_to_le64(sgl->phys_addr); + + /* + * Start a new segment. + * Take one away and save for chain SGE + */ + + sgc->sge.prd.curr = + (struct atto_physical_region_description *)sgl + -> + virt_addr; + sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1; + } + + sgc->sge.prd.sge_cnt--; + /* Build the simple SGE */ + sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len); + sgc->sge.prd.curr->address = cpu_to_le64(addr); + + /* Used another element. Point to the next one */ + + sgc->sge.prd.curr++; + + /* Adjust length related fields */ + + sgc->cur_offset += len; + sgc->length -= len; + + /* + * Check if we previously split an entry. If so we have to + * pick up where we left off. + */ + + if (rem) { + addr += len; + len = rem; + rem = 0; + goto another_entry; + } + } + + if (!list_empty(&rq->sg_table_head)) { + if (sgc->sge.prd.chain) { + sgc->sge.prd.chain->ctl_len |= + cpu_to_le32(sgc->sge.prd.sgl_max_cnt + - sgc->sge.prd.sge_cnt + - numchain); + } + } + + return true; +} + +bool esas2r_build_sg_list_prd(struct esas2r_adapter *a, + struct esas2r_sg_context *sgc) +{ + struct esas2r_request *rq = sgc->first_req; + u32 len = sgc->length; + struct esas2r_target *t = a->targetdb + rq->target_id; + u8 is_i_o = 0; + u16 reqsize; + struct atto_physical_region_description *curr_iblk_chn; + u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0]; + + /* + * extract LBA from command so we can determine + * the I-Block boundary + */ + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && t->target_state == TS_PRESENT + && !(t->flags & TF_PASS_THRU)) { + u32 lbalo = 0; + + switch (rq->vrq->scsi.cdb[0]) { + case READ_16: + case WRITE_16: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[9], + cdb[8]), + MAKEWORD(cdb[7], + cdb[6])); + is_i_o = 1; + break; + } + + case READ_12: + case WRITE_12: + case READ_10: + case WRITE_10: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[5], + cdb[4]), + MAKEWORD(cdb[3], + cdb[2])); + is_i_o = 1; + break; + } + + case READ_6: + case WRITE_6: + { + lbalo = + MAKEDWORD(MAKEWORD(cdb[3], + cdb[2]), + MAKEWORD(cdb[1] & 0x1F, + 0)); + is_i_o = 1; + break; + } + + default: + break; + } + + if (is_i_o) { + u32 startlba; + + rq->vrq->scsi.iblk_cnt_prd = 0; + + /* Determine size of 1st I-block PRD list */ + startlba = t->inter_block - (lbalo & (t->inter_block - + 1)); + sgc->length = startlba * t->block_size; + + /* Chk if the 1st iblk chain starts at base of Iblock */ + if ((lbalo & (t->inter_block - 1)) == 0) + rq->flags |= RF_1ST_IBLK_BASE; + + if (sgc->length > len) + sgc->length = len; + } else { + sgc->length = len; + } + } else { + sgc->length = len; + } + + /* get our starting chain address */ + + curr_iblk_chn = + (struct atto_physical_region_description *)sgc->sge.a64.curr; + + sgc->sge.prd.sgl_max_cnt = sgl_page_size / + sizeof(struct + atto_physical_region_description); + + /* create all of the I-block PRD lists */ + + while (len) { + sgc->sge.prd.sge_cnt = 0; + sgc->sge.prd.chain = NULL; + sgc->sge.prd.curr = curr_iblk_chn; + + /* increment to next I-Block */ + + len -= sgc->length; + + /* go build the next I-Block PRD list */ + + if (unlikely(!esas2r_build_prd_iblk(a, sgc))) + return false; + + curr_iblk_chn++; + + if (is_i_o) { + rq->vrq->scsi.iblk_cnt_prd++; + + if (len > t->inter_byte) + sgc->length = t->inter_byte; + else + sgc->length = len; + } + } + + /* figure out the size used of the VDA request */ + + reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq)) + / sizeof(u32); + + /* + * only update the request size if it is bigger than what is + * already there. we can come in here twice for some management + * commands. + */ + + if (reqsize > rq->vda_req_sz) + rq->vda_req_sz = reqsize; + + return true; +} + +static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime) +{ + u32 delta = currtime - a->chip_init_time; + + if (delta <= ESAS2R_CHPRST_WAIT_TIME) { + /* Wait before accessing registers */ + } else if (delta >= ESAS2R_CHPRST_TIME) { + /* + * The last reset failed so try again. Reset + * processing will give up after three tries. + */ + esas2r_local_reset_adapter(a); + } else { + /* We can now see if the firmware is ready */ + u32 doorbell; + + doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT); + if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) { + esas2r_force_interrupt(a); + } else { + u32 ver = (doorbell & DRBL_FW_VER_MSK); + + /* Driver supports API version 0 and 1 */ + esas2r_write_register_dword(a, MU_DOORBELL_OUT, + doorbell); + if (ver == DRBL_FW_VER_0) { + set_bit(AF_CHPRST_DETECTED, &a->flags); + set_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 128; + a->build_sgl = esas2r_build_sg_list_sge; + } else if (ver == DRBL_FW_VER_1) { + set_bit(AF_CHPRST_DETECTED, &a->flags); + clear_bit(AF_LEGACY_SGE_MODE, &a->flags); + + a->max_vdareq_size = 1024; + a->build_sgl = esas2r_build_sg_list_prd; + } else { + esas2r_local_reset_adapter(a); + } + } + } +} + + +/* This function must be called once per timer tick */ +void esas2r_timer_tick(struct esas2r_adapter *a) +{ + u32 currtime = jiffies_to_msecs(jiffies); + u32 deltatime = currtime - a->last_tick_time; + + a->last_tick_time = currtime; + + /* count down the uptime */ + if (a->chip_uptime && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags)) { + if (deltatime >= a->chip_uptime) + a->chip_uptime = 0; + else + a->chip_uptime -= deltatime; + } + + if (test_bit(AF_CHPRST_PENDING, &a->flags)) { + if (!test_bit(AF_CHPRST_NEEDED, &a->flags) && + !test_bit(AF_CHPRST_DETECTED, &a->flags)) + esas2r_handle_pending_reset(a, currtime); + } else { + if (test_bit(AF_DISC_PENDING, &a->flags)) + esas2r_disc_check_complete(a); + if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) { + if (test_bit(AF_HEARTBEAT, &a->flags)) { + if ((currtime - a->heartbeat_time) >= + ESAS2R_HEARTBEAT_TIME) { + clear_bit(AF_HEARTBEAT, &a->flags); + esas2r_hdebug("heartbeat failed"); + esas2r_log(ESAS2R_LOG_CRIT, + "heartbeat failed"); + esas2r_bugon(); + esas2r_local_reset_adapter(a); + } + } else { + set_bit(AF_HEARTBEAT, &a->flags); + a->heartbeat_time = currtime; + esas2r_force_interrupt(a); + } + } + } + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); +} + +/* + * Send the specified task management function to the target and LUN + * specified in rqaux. in addition, immediately abort any commands that + * are queued but not sent to the device according to the rules specified + * by the task management function. + */ +bool esas2r_send_task_mgmt(struct esas2r_adapter *a, + struct esas2r_request *rqaux, u8 task_mgt_func) +{ + u16 targetid = rqaux->target_id; + u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags); + bool ret = false; + struct esas2r_request *rq; + struct list_head *next, *element; + unsigned long flags; + + LIST_HEAD(comp_list); + + esas2r_trace_enter(); + esas2r_trace("rqaux:%p", rqaux); + esas2r_trace("task_mgt_func:%x", task_mgt_func); + spin_lock_irqsave(&a->queue_lock, flags); + + /* search the defer queue looking for requests for the device */ + list_for_each_safe(element, next, &a->defer_list) { + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && rq->target_id == targetid + && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun + || task_mgt_func == 0x20)) { /* target reset */ + /* Found a request affected by the task management */ + if (rq->req_stat == RS_PENDING) { + /* + * The request is pending or waiting. We can + * safelycomplete the request now. + */ + if (esas2r_ioreq_aborted(a, rq, RS_ABORTED)) + list_add_tail(&rq->comp_list, + &comp_list); + } + } + } + + /* Send the task management request to the firmware */ + rqaux->sense_len = 0; + rqaux->vrq->scsi.length = 0; + rqaux->target_id = targetid; + rqaux->vrq->scsi.flags |= cpu_to_le32(lun); + memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb)); + rqaux->vrq->scsi.flags |= + cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); + + if (test_bit(AF_FLASHING, &a->flags)) { + /* Assume success. if there are active requests, return busy */ + rqaux->req_stat = RS_SUCCESS; + + list_for_each_safe(element, next, &a->active_list) { + rq = list_entry(element, struct esas2r_request, + req_list); + if (rq->vrq->scsi.function == VDA_FUNC_SCSI + && rq->target_id == targetid + && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun + || task_mgt_func == 0x20)) /* target reset */ + rqaux->req_stat = RS_BUSY; + } + + ret = true; + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + if (!test_bit(AF_FLASHING, &a->flags)) + esas2r_start_request(a, rqaux); + + esas2r_comp_list_drain(a, &comp_list); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + esas2r_trace_exit(); + + return ret; +} + +void esas2r_reset_bus(struct esas2r_adapter *a) +{ + esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); + + if (!test_bit(AF_DEGRADED_MODE, &a->flags) && + !test_bit(AF_CHPRST_PENDING, &a->flags) && + !test_bit(AF_DISC_PENDING, &a->flags)) { + set_bit(AF_BUSRST_NEEDED, &a->flags); + set_bit(AF_BUSRST_PENDING, &a->flags); + set_bit(AF_OS_RESET, &a->flags); + + esas2r_schedule_tasklet(a); + } +} + +bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq, + u8 status) +{ + esas2r_trace_enter(); + esas2r_trace("rq:%p", rq); + list_del_init(&rq->req_list); + if (rq->timeout > RQ_MAX_TIMEOUT) { + /* + * The request timed out, but we could not abort it because a + * chip reset occurred. Return busy status. + */ + rq->req_stat = RS_BUSY; + esas2r_trace_exit(); + return true; + } + + rq->req_stat = status; + esas2r_trace_exit(); + return true; +} diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c new file mode 100644 index 00000000000..d89a0277a8e --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -0,0 +1,2110 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_ioctl.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* + * Buffered ioctl handlers. A buffered ioctl is one which requires that we + * allocate a DMA-able memory area to communicate with the firmware. In + * order to prevent continually allocating and freeing consistent memory, + * we will allocate a global buffer the first time we need it and re-use + * it for subsequent ioctl calls that require it. + */ + +u8 *esas2r_buffered_ioctl; +dma_addr_t esas2r_buffered_ioctl_addr; +u32 esas2r_buffered_ioctl_size; +struct pci_dev *esas2r_buffered_ioctl_pcid; + +static DEFINE_SEMAPHORE(buffered_ioctl_semaphore); +typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, + struct esas2r_request *, + struct esas2r_sg_context *, + void *); +typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, + struct esas2r_request *, void *); + +struct esas2r_buffered_ioctl { + struct esas2r_adapter *a; + void *ioctl; + u32 length; + u32 control_code; + u32 offset; + BUFFERED_IOCTL_CALLBACK + callback; + void *context; + BUFFERED_IOCTL_DONE_CALLBACK + done_callback; + void *done_context; + +}; + +static void complete_fm_api_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->fm_api_command_done = 1; + wake_up_interruptible(&a->fm_api_waiter); +} + +/* Callbacks for building scatter/gather lists for FM API requests */ +static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = sgc->cur_offset - a->save_offset; + + (*addr) = a->firmware.phys + offset; + return a->firmware.orig_len - offset; +} + +static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = sgc->cur_offset - a->save_offset; + + (*addr) = a->firmware.header_buff_phys + offset; + return sizeof(struct esas2r_flash_img) - offset; +} + +/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ +static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) +{ + struct esas2r_request *rq; + + if (down_interruptible(&a->fm_api_semaphore)) { + fi->status = FI_STAT_BUSY; + return; + } + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + up(&a->fm_api_semaphore); + fi->status = FI_STAT_BUSY; + return; + } + + if (fi == &a->firmware.header) { + a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev, + (size_t)sizeof( + struct + esas2r_flash_img), + (dma_addr_t *)&a-> + firmware. + header_buff_phys, + GFP_KERNEL); + + if (a->firmware.header_buff == NULL) { + esas2r_debug("failed to allocate header buffer!"); + fi->status = FI_STAT_BUSY; + return; + } + + memcpy(a->firmware.header_buff, fi, + sizeof(struct esas2r_flash_img)); + a->save_offset = a->firmware.header_buff; + a->fm_api_sgc.get_phys_addr = + (PGETPHYSADDR)get_physaddr_fm_api_header; + } else { + a->save_offset = (u8 *)fi; + a->fm_api_sgc.get_phys_addr = + (PGETPHYSADDR)get_physaddr_fm_api; + } + + rq->comp_cb = complete_fm_api_req; + a->fm_api_command_done = 0; + a->fm_api_sgc.cur_offset = a->save_offset; + + if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, + &a->fm_api_sgc)) + goto all_done; + + /* Now wait around for it to complete. */ + while (!a->fm_api_command_done) + wait_event_interruptible(a->fm_api_waiter, + a->fm_api_command_done); +all_done: + if (fi == &a->firmware.header) { + memcpy(fi, a->firmware.header_buff, + sizeof(struct esas2r_flash_img)); + + dma_free_coherent(&a->pcid->dev, + (size_t)sizeof(struct esas2r_flash_img), + a->firmware.header_buff, + (dma_addr_t)a->firmware.header_buff_phys); + } + + up(&a->fm_api_semaphore); + esas2r_free_request(a, (struct esas2r_request *)rq); + return; + +} + +static void complete_nvr_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->nvram_command_done = 1; + wake_up_interruptible(&a->nvram_waiter); +} + +/* Callback for building scatter/gather lists for buffered ioctls */ +static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, + u64 *addr) +{ + int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; + + (*addr) = esas2r_buffered_ioctl_addr + offset; + return esas2r_buffered_ioctl_size - offset; +} + +static void complete_buffered_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->buffered_ioctl_done = 1; + wake_up_interruptible(&a->buffered_ioctl_waiter); +} + +static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) +{ + struct esas2r_adapter *a = bi->a; + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + u8 result = IOCTL_SUCCESS; + + if (down_interruptible(&buffered_ioctl_semaphore)) + return IOCTL_OUT_OF_RESOURCES; + + /* allocate a buffer or use the existing buffer. */ + if (esas2r_buffered_ioctl) { + if (esas2r_buffered_ioctl_size < bi->length) { + /* free the too-small buffer and get a new one */ + dma_free_coherent(&a->pcid->dev, + (size_t)esas2r_buffered_ioctl_size, + esas2r_buffered_ioctl, + esas2r_buffered_ioctl_addr); + + goto allocate_buffer; + } + } else { +allocate_buffer: + esas2r_buffered_ioctl_size = bi->length; + esas2r_buffered_ioctl_pcid = a->pcid; + esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev, + (size_t) + esas2r_buffered_ioctl_size, + & + esas2r_buffered_ioctl_addr, + GFP_KERNEL); + } + + if (!esas2r_buffered_ioctl) { + esas2r_log(ESAS2R_LOG_CRIT, + "could not allocate %d bytes of consistent memory " + "for a buffered ioctl!", + bi->length); + + esas2r_debug("buffered ioctl alloc failure"); + result = IOCTL_OUT_OF_RESOURCES; + goto exit_cleanly; + } + + memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, + "could not allocate an internal request"); + + result = IOCTL_OUT_OF_RESOURCES; + esas2r_debug("buffered ioctl - no requests"); + goto exit_cleanly; + } + + a->buffered_ioctl_done = 0; + rq->comp_cb = complete_buffered_ioctl_req; + sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; + sgc.length = esas2r_buffered_ioctl_size; + + if (!(*bi->callback)(a, rq, &sgc, bi->context)) { + /* completed immediately, no need to wait */ + a->buffered_ioctl_done = 0; + goto free_andexit_cleanly; + } + + /* now wait around for it to complete. */ + while (!a->buffered_ioctl_done) + wait_event_interruptible(a->buffered_ioctl_waiter, + a->buffered_ioctl_done); + +free_andexit_cleanly: + if (result == IOCTL_SUCCESS && bi->done_callback) + (*bi->done_callback)(a, rq, bi->done_context); + + esas2r_free_request(a, rq); + +exit_cleanly: + if (result == IOCTL_SUCCESS) + memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); + + up(&buffered_ioctl_semaphore); + return result; +} + +/* SMP ioctl support */ +static int smp_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, void *context) +{ + struct atto_ioctl_smp *si = + (struct atto_ioctl_smp *)esas2r_buffered_ioctl; + + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + si->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + + esas2r_start_request(a, rq); + return true; +} + +static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = si; + bi.length = sizeof(struct atto_ioctl_smp) + + le32_to_cpu(si->req_length) + + le32_to_cpu(si->rsp_length); + bi.offset = 0; + bi.callback = smp_ioctl_callback; + return handle_buffered_ioctl(&bi); +} + + +/* CSMI ioctl support */ +static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); + rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); + + /* Now call the original completion callback. */ + (*rq->aux_req_cb)(a, rq); +} + +/* Tunnel a CSMI IOCTL to the back end driver for processing. */ +static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, + union atto_ioctl_csmi *ci, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, + u32 ctrl_code, + u16 target_id) +{ + struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return false; + + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI); + ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); + ioctl->csmi.target_id = cpu_to_le16(target_id); + ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); + + /* + * Always usurp the completion callback since the interrupt callback + * mechanism may be used. + */ + rq->aux_req_cx = ci; + rq->aux_req_cb = rq->comp_cb; + rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; + + if (!esas2r_build_sg_list(a, rq, sgc)) + return false; + + esas2r_start_request(a, rq); + return true; +} + +static bool check_lun(struct scsi_lun lun) +{ + bool result; + + result = ((lun.scsi_lun[7] == 0) && + (lun.scsi_lun[6] == 0) && + (lun.scsi_lun[5] == 0) && + (lun.scsi_lun[4] == 0) && + (lun.scsi_lun[3] == 0) && + (lun.scsi_lun[2] == 0) && +/* Byte 1 is intentionally skipped */ + (lun.scsi_lun[0] == 0)); + + return result; +} + +static int csmi_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, void *context) +{ + struct atto_csmi *ci = (struct atto_csmi *)context; + union atto_ioctl_csmi *ioctl_csmi = + (union atto_ioctl_csmi *)esas2r_buffered_ioctl; + u8 path = 0; + u8 tid = 0; + u8 lun = 0; + u32 sts = CSMI_STS_SUCCESS; + struct esas2r_target *t; + unsigned long flags; + + if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { + struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; + + path = gda->path_id; + tid = gda->target_id; + lun = gda->lun; + } else if (ci->control_code == CSMI_CC_TASK_MGT) { + struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; + + path = tm->path_id; + tid = tm->target_id; + lun = tm->lun; + } + + if (path > 0) { + rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( + CSMI_STS_INV_PARAM); + return false; + } + + rq->target_id = tid; + rq->vrq->scsi.flags |= cpu_to_le32(lun); + + switch (ci->control_code) { + case CSMI_CC_GET_DRVR_INFO: + { + struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; + + strcpy(gdi->description, esas2r_get_model_name(a)); + gdi->csmi_major_rev = CSMI_MAJOR_REV; + gdi->csmi_minor_rev = CSMI_MINOR_REV; + break; + } + + case CSMI_CC_GET_CNTLR_CFG: + { + struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; + + gcc->base_io_addr = 0; + pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2, + &gcc->base_memaddr_lo); + pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3, + &gcc->base_memaddr_hi); + gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, + a->pcid->subsystem_vendor); + gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; + gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; + gcc->io_bus_type = CSMI_BUS_TYPE_PCI; + gcc->pci_addr.bus_num = a->pcid->bus->number; + gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); + gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); + + memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); + + gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); + gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); + gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); + gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); + gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); + gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); + gcc->bios_build_rev = LOWORD(a->flash_ver); + + if (test_bit(AF2_THUNDERLINK, &a->flags2)) + gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA + | CSMI_CNTLRF_SATA_HBA; + else + gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID + | CSMI_CNTLRF_SATA_RAID; + + gcc->rrom_major_rev = 0; + gcc->rrom_minor_rev = 0; + gcc->rrom_build_rev = 0; + gcc->rrom_release_rev = 0; + gcc->rrom_biosmajor_rev = 0; + gcc->rrom_biosminor_rev = 0; + gcc->rrom_biosbuild_rev = 0; + gcc->rrom_biosrelease_rev = 0; + break; + } + + case CSMI_CC_GET_CNTLR_STS: + { + struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + gcs->status = CSMI_CNTLR_STS_FAILED; + else + gcs->status = CSMI_CNTLR_STS_GOOD; + + gcs->offline_reason = CSMI_OFFLINE_NO_REASON; + break; + } + + case CSMI_CC_FW_DOWNLOAD: + case CSMI_CC_GET_RAID_INFO: + case CSMI_CC_GET_RAID_CFG: + + sts = CSMI_STS_BAD_CTRL_CODE; + break; + + case CSMI_CC_SMP_PASSTHRU: + case CSMI_CC_SSP_PASSTHRU: + case CSMI_CC_STP_PASSTHRU: + case CSMI_CC_GET_PHY_INFO: + case CSMI_CC_SET_PHY_INFO: + case CSMI_CC_GET_LINK_ERRORS: + case CSMI_CC_GET_SATA_SIG: + case CSMI_CC_GET_CONN_INFO: + case CSMI_CC_PHY_CTRL: + + if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, + ci->control_code, + ESAS2R_TARG_ID_INV)) { + sts = CSMI_STS_FAILED; + break; + } + + return true; + + case CSMI_CC_GET_SCSI_ADDR: + { + struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; + + struct scsi_lun lun; + + memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); + + if (!check_lun(lun)) { + sts = CSMI_STS_NO_SCSI_ADDR; + break; + } + + /* make sure the device is present */ + spin_lock_irqsave(&a->mem_lock, flags); + t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr); + spin_unlock_irqrestore(&a->mem_lock, flags); + + if (t == NULL) { + sts = CSMI_STS_NO_SCSI_ADDR; + break; + } + + gsa->host_index = 0xFF; + gsa->lun = gsa->sas_lun[1]; + rq->target_id = esas2r_targ_get_id(t, a); + break; + } + + case CSMI_CC_GET_DEV_ADDR: + { + struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; + + /* make sure the target is present */ + t = a->targetdb + rq->target_id; + + if (t >= a->targetdb_end + || t->target_state != TS_PRESENT + || t->sas_addr == 0) { + sts = CSMI_STS_NO_DEV_ADDR; + break; + } + + /* fill in the result */ + *(u64 *)gda->sas_addr = t->sas_addr; + memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); + gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); + break; + } + + case CSMI_CC_TASK_MGT: + + /* make sure the target is present */ + t = a->targetdb + rq->target_id; + + if (t >= a->targetdb_end + || t->target_state != TS_PRESENT + || !(t->flags & TF_PASS_THRU)) { + sts = CSMI_STS_NO_DEV_ADDR; + break; + } + + if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, + ci->control_code, + t->phys_targ_id)) { + sts = CSMI_STS_FAILED; + break; + } + + return true; + + default: + + sts = CSMI_STS_BAD_CTRL_CODE; + break; + } + + rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); + + return false; +} + + +static void csmi_ioctl_done_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, void *context) +{ + struct atto_csmi *ci = (struct atto_csmi *)context; + union atto_ioctl_csmi *ioctl_csmi = + (union atto_ioctl_csmi *)esas2r_buffered_ioctl; + + switch (ci->control_code) { + case CSMI_CC_GET_DRVR_INFO: + { + struct atto_csmi_get_driver_info *gdi = + &ioctl_csmi->drvr_info; + + strcpy(gdi->name, ESAS2R_VERSION_STR); + + gdi->major_rev = ESAS2R_MAJOR_REV; + gdi->minor_rev = ESAS2R_MINOR_REV; + gdi->build_rev = 0; + gdi->release_rev = 0; + break; + } + + case CSMI_CC_GET_SCSI_ADDR: + { + struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; + + if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == + CSMI_STS_SUCCESS) { + gsa->target_id = rq->target_id; + gsa->path_id = 0; + } + + break; + } + } + + ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); +} + + +static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = &ci->data; + bi.length = sizeof(union atto_ioctl_csmi); + bi.offset = 0; + bi.callback = csmi_ioctl_callback; + bi.context = ci; + bi.done_callback = csmi_ioctl_done_callback; + bi.done_context = ci; + + return handle_buffered_ioctl(&bi); +} + +/* ATTO HBA ioctl support */ + +/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ +static bool hba_ioctl_tunnel(struct esas2r_adapter *a, + struct atto_ioctl *hi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); + + esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA); + + if (!esas2r_build_sg_list(a, rq, sgc)) { + hi->status = ATTO_STS_OUT_OF_RSRC; + + return false; + } + + esas2r_start_request(a, rq); + + return true; +} + +static void scsi_passthru_comp_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; + struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; + u8 sts = ATTO_SPT_RS_FAILED; + + spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; + spt->sense_length = rq->sense_len; + spt->residual_length = + le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); + + switch (rq->req_stat) { + case RS_SUCCESS: + case RS_SCSI_ERROR: + sts = ATTO_SPT_RS_SUCCESS; + break; + case RS_UNDERRUN: + sts = ATTO_SPT_RS_UNDERRUN; + break; + case RS_OVERRUN: + sts = ATTO_SPT_RS_OVERRUN; + break; + case RS_SEL: + case RS_SEL2: + sts = ATTO_SPT_RS_NO_DEVICE; + break; + case RS_NO_LUN: + sts = ATTO_SPT_RS_NO_LUN; + break; + case RS_TIMEOUT: + sts = ATTO_SPT_RS_TIMEOUT; + break; + case RS_DEGRADED: + sts = ATTO_SPT_RS_DEGRADED; + break; + case RS_BUSY: + sts = ATTO_SPT_RS_BUSY; + break; + case RS_ABORTED: + sts = ATTO_SPT_RS_ABORTED; + break; + case RS_RESET: + sts = ATTO_SPT_RS_BUS_RESET; + break; + } + + spt->req_status = sts; + + /* Update the target ID to the next one present. */ + spt->target_id = + esas2r_targ_db_find_next_present(a, (u16)spt->target_id); + + /* Done, call the completion callback. */ + (*rq->aux_req_cb)(a, rq); +} + +static int hba_ioctl_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc, + void *context) +{ + struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; + + hi->status = ATTO_STS_SUCCESS; + + switch (hi->function) { + case ATTO_FUNC_GET_ADAP_INFO: + { + u8 *class_code = (u8 *)&a->pcid->class; + + struct atto_hba_get_adapter_info *gai = + &hi->data.get_adap_info; + int pcie_cap_reg; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_GET_ADAP_INFO0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_ADAP_INFO0; + break; + } + + memset(gai, 0, sizeof(*gai)); + + gai->pci.vendor_id = a->pcid->vendor; + gai->pci.device_id = a->pcid->device; + gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; + gai->pci.ss_device_id = a->pcid->subsystem_device; + gai->pci.class_code[0] = class_code[0]; + gai->pci.class_code[1] = class_code[1]; + gai->pci.class_code[2] = class_code[2]; + gai->pci.rev_id = a->pcid->revision; + gai->pci.bus_num = a->pcid->bus->number; + gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); + gai->pci.func_num = PCI_FUNC(a->pcid->devfn); + + pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); + if (pcie_cap_reg) { + u16 stat; + u32 caps; + + pci_read_config_word(a->pcid, + pcie_cap_reg + PCI_EXP_LNKSTA, + &stat); + pci_read_config_dword(a->pcid, + pcie_cap_reg + PCI_EXP_LNKCAP, + &caps); + + gai->pci.link_speed_curr = + (u8)(stat & PCI_EXP_LNKSTA_CLS); + gai->pci.link_speed_max = + (u8)(caps & PCI_EXP_LNKCAP_SLS); + gai->pci.link_width_curr = + (u8)((stat & PCI_EXP_LNKSTA_NLW) + >> PCI_EXP_LNKSTA_NLW_SHIFT); + gai->pci.link_width_max = + (u8)((caps & PCI_EXP_LNKCAP_MLW) + >> 4); + } + + gai->pci.msi_vector_cnt = 1; + + if (a->pcid->msix_enabled) + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; + else if (a->pcid->msi_enabled) + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; + else + gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; + + gai->adap_type = ATTO_GAI_AT_ESASRAID2; + + if (test_bit(AF2_THUNDERLINK, &a->flags2)) + gai->adap_type = ATTO_GAI_AT_TLSASHBA; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + gai->adap_flags |= ATTO_GAI_AF_DEGRADED; + + gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | + ATTO_GAI_AF_DEVADDR_SUPP; + + if (a->pcid->subsystem_device == ATTO_ESAS_R60F + || a->pcid->subsystem_device == ATTO_ESAS_R608 + || a->pcid->subsystem_device == ATTO_ESAS_R644 + || a->pcid->subsystem_device == ATTO_TSSC_3808E) + gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; + + gai->num_ports = ESAS2R_NUM_PHYS; + gai->num_phys = ESAS2R_NUM_PHYS; + + strcpy(gai->firmware_rev, a->fw_rev); + strcpy(gai->flash_rev, a->flash_rev); + strcpy(gai->model_name_short, esas2r_get_model_name_short(a)); + strcpy(gai->model_name, esas2r_get_model_name(a)); + + gai->num_targets = ESAS2R_MAX_TARGETS; + + gai->num_busses = 1; + gai->num_targsper_bus = gai->num_targets; + gai->num_lunsper_targ = 256; + + if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 + || a->pcid->subsystem_device == ATTO_ESAS_R60F) + gai->num_connectors = 4; + else + gai->num_connectors = 2; + + gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; + + gai->num_targets_backend = a->num_targets_backend; + + gai->tunnel_flags = a->ioctl_tunnel + & (ATTO_GAI_TF_MEM_RW + | ATTO_GAI_TF_TRACE + | ATTO_GAI_TF_SCSI_PASS_THRU + | ATTO_GAI_TF_GET_DEV_ADDR + | ATTO_GAI_TF_PHY_CTRL + | ATTO_GAI_TF_CONN_CTRL + | ATTO_GAI_TF_GET_DEV_INFO); + break; + } + + case ATTO_FUNC_GET_ADAP_ADDR: + { + struct atto_hba_get_adapter_address *gaa = + &hi->data.get_adap_addr; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_ADAP_ADDR0; + } else if (gaa->addr_type == ATTO_GAA_AT_PORT + || gaa->addr_type == ATTO_GAA_AT_NODE) { + if (gaa->addr_type == ATTO_GAA_AT_PORT + && gaa->port_id >= ESAS2R_NUM_PHYS) { + hi->status = ATTO_STS_NOT_APPL; + } else { + memcpy((u64 *)gaa->address, + &a->nvram->sas_addr[0], sizeof(u64)); + gaa->addr_len = sizeof(u64); + } + } else { + hi->status = ATTO_STS_INV_PARAM; + } + + break; + } + + case ATTO_FUNC_MEM_RW: + { + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + hi->status = ATTO_STS_UNSUPPORTED; + + break; + } + + case ATTO_FUNC_TRACE: + { + struct atto_hba_trace *trc = &hi->data.trace; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_TRACE1) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_TRACE1; + break; + } + + if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP + && hi->version >= ATTO_VER_TRACE1) { + if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { + u32 len = hi->data_length; + u32 offset = trc->current_offset; + u32 total_len = ESAS2R_FWCOREDUMP_SZ; + + /* Size is zero if a core dump isn't present */ + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) + total_len = 0; + + if (len > total_len) + len = total_len; + + if (offset >= total_len + || offset + len > total_len + || len == 0) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + memcpy(trc + 1, + a->fw_coredump_buff + offset, + len); + + hi->data_length = len; + } else if (trc->trace_func == ATTO_TRC_TF_RESET) { + memset(a->fw_coredump_buff, 0, + ESAS2R_FWCOREDUMP_SZ); + + clear_bit(AF2_COREDUMP_SAVED, &a->flags2); + } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + /* Always return all the info we can. */ + trc->trace_mask = 0; + trc->current_offset = 0; + trc->total_length = ESAS2R_FWCOREDUMP_SZ; + + /* Return zero length buffer if core dump not present */ + if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) + trc->total_length = 0; + } else { + hi->status = ATTO_STS_UNSUPPORTED; + } + + break; + } + + case ATTO_FUNC_SCSI_PASS_THRU: + { + struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; + struct scsi_lun lun; + + memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_SCSI_PASS_THRU0; + break; + } + + if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + esas2r_sgc_init(sgc, a, rq, NULL); + + sgc->length = hi->data_length; + sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) + + sizeof(struct atto_hba_scsi_pass_thru); + + /* Finish request initialization */ + rq->target_id = (u16)spt->target_id; + rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); + memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); + rq->vrq->scsi.length = cpu_to_le32(hi->data_length); + rq->sense_len = spt->sense_length; + rq->sense_buf = (u8 *)spt->sense_data; + /* NOTE: we ignore spt->timeout */ + + /* + * always usurp the completion callback since the interrupt + * callback mechanism may be used. + */ + + rq->aux_req_cx = hi; + rq->aux_req_cb = rq->comp_cb; + rq->comp_cb = scsi_passthru_comp_cb; + + if (spt->flags & ATTO_SPTF_DATA_IN) { + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); + } else if (spt->flags & ATTO_SPTF_DATA_OUT) { + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); + } else { + if (sgc->length) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + } + + if (spt->flags & ATTO_SPTF_ORDERED_Q) + rq->vrq->scsi.flags |= + cpu_to_le32(FCP_CMND_TA_ORDRD_Q); + else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); + + + if (!esas2r_build_sg_list(a, rq, sgc)) { + hi->status = ATTO_STS_OUT_OF_RSRC; + break; + } + + esas2r_start_request(a, rq); + + return true; + } + + case ATTO_FUNC_GET_DEV_ADDR: + { + struct atto_hba_get_device_address *gda = + &hi->data.get_dev_addr; + struct esas2r_target *t; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_GET_DEV_ADDR0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_DEV_ADDR0; + break; + } + + if (gda->target_id >= ESAS2R_MAX_TARGETS) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + t = a->targetdb + (u16)gda->target_id; + + if (t->target_state != TS_PRESENT) { + hi->status = ATTO_STS_FAILED; + } else if (gda->addr_type == ATTO_GDA_AT_PORT) { + if (t->sas_addr == 0) { + hi->status = ATTO_STS_UNSUPPORTED; + } else { + *(u64 *)gda->address = t->sas_addr; + + gda->addr_len = sizeof(u64); + } + } else if (gda->addr_type == ATTO_GDA_AT_NODE) { + hi->status = ATTO_STS_NOT_APPL; + } else { + hi->status = ATTO_STS_INV_PARAM; + } + + /* update the target ID to the next one present. */ + + gda->target_id = + esas2r_targ_db_find_next_present(a, + (u16)gda->target_id); + break; + } + + case ATTO_FUNC_PHY_CTRL: + case ATTO_FUNC_CONN_CTRL: + { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + case ATTO_FUNC_ADAP_CTRL: + { + struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; + + if (hi->flags & HBAF_TUNNEL) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (hi->version > ATTO_VER_ADAP_CTRL0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_ADAP_CTRL0; + break; + } + + if (ac->adap_func == ATTO_AC_AF_HARD_RST) { + esas2r_reset_adapter(a); + } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + if (test_bit(AF_CHPRST_NEEDED, &a->flags)) + ac->adap_state = ATTO_AC_AS_RST_SCHED; + else if (test_bit(AF_CHPRST_PENDING, &a->flags)) + ac->adap_state = ATTO_AC_AS_RST_IN_PROG; + else if (test_bit(AF_DISC_PENDING, &a->flags)) + ac->adap_state = ATTO_AC_AS_RST_DISC; + else if (test_bit(AF_DISABLED, &a->flags)) + ac->adap_state = ATTO_AC_AS_DISABLED; + else if (test_bit(AF_DEGRADED_MODE, &a->flags)) + ac->adap_state = ATTO_AC_AS_DEGRADED; + else + ac->adap_state = ATTO_AC_AS_OK; + + break; + } + + case ATTO_FUNC_GET_DEV_INFO: + { + struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; + struct esas2r_target *t; + + if (hi->flags & HBAF_TUNNEL) { + if (hba_ioctl_tunnel(a, hi, rq, sgc)) + return true; + + break; + } + + if (hi->version > ATTO_VER_GET_DEV_INFO0) { + hi->status = ATTO_STS_INV_VERSION; + hi->version = ATTO_VER_GET_DEV_INFO0; + break; + } + + if (gdi->target_id >= ESAS2R_MAX_TARGETS) { + hi->status = ATTO_STS_INV_PARAM; + break; + } + + t = a->targetdb + (u16)gdi->target_id; + + /* update the target ID to the next one present. */ + + gdi->target_id = + esas2r_targ_db_find_next_present(a, + (u16)gdi->target_id); + + if (t->target_state != TS_PRESENT) { + hi->status = ATTO_STS_FAILED; + break; + } + + hi->status = ATTO_STS_UNSUPPORTED; + break; + } + + default: + + hi->status = ATTO_STS_INV_FUNC; + break; + } + + return false; +} + +static void hba_ioctl_done_callback(struct esas2r_adapter *a, + struct esas2r_request *rq, void *context) +{ + struct atto_ioctl *ioctl_hba = + (struct atto_ioctl *)esas2r_buffered_ioctl; + + esas2r_debug("hba_ioctl_done_callback %d", a->index); + + if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { + struct atto_hba_get_adapter_info *gai = + &ioctl_hba->data.get_adap_info; + + esas2r_debug("ATTO_FUNC_GET_ADAP_INFO"); + + gai->drvr_rev_major = ESAS2R_MAJOR_REV; + gai->drvr_rev_minor = ESAS2R_MINOR_REV; + + strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR); + strcpy(gai->drvr_name, ESAS2R_DRVR_NAME); + + gai->num_busses = 1; + gai->num_targsper_bus = ESAS2R_MAX_ID + 1; + gai->num_lunsper_targ = 1; + } +} + +u8 handle_hba_ioctl(struct esas2r_adapter *a, + struct atto_ioctl *ioctl_hba) +{ + struct esas2r_buffered_ioctl bi; + + memset(&bi, 0, sizeof(bi)); + + bi.a = a; + bi.ioctl = ioctl_hba; + bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; + bi.callback = hba_ioctl_callback; + bi.context = NULL; + bi.done_callback = hba_ioctl_done_callback; + bi.done_context = NULL; + bi.offset = 0; + + return handle_buffered_ioctl(&bi); +} + + +int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, + struct esas2r_sas_nvram *data) +{ + int result = 0; + + a->nvram_command_done = 0; + rq->comp_cb = complete_nvr_req; + + if (esas2r_nvram_write(a, rq, data)) { + /* now wait around for it to complete. */ + while (!a->nvram_command_done) + wait_event_interruptible(a->nvram_waiter, + a->nvram_command_done); + ; + + /* done, check the status. */ + if (rq->req_stat == RS_SUCCESS) + result = 1; + } + return result; +} + + +/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ +int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) +{ + struct atto_express_ioctl *ioctl = NULL; + struct esas2r_adapter *a; + struct esas2r_request *rq; + u16 code; + int err; + + esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg); + + if ((arg == NULL) + || (cmd < EXPRESS_IOCTL_MIN) + || (cmd > EXPRESS_IOCTL_MAX)) + return -ENOTSUPP; + + if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler access_ok failed for cmd %d, " + "address %p", cmd, + arg); + return -EFAULT; + } + + /* allocate a kernel memory buffer for the IOCTL data */ + ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); + if (ioctl == NULL) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler kzalloc failed for %d bytes", + sizeof(struct atto_express_ioctl)); + return -ENOMEM; + } + + err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl)); + if (err != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "copy_from_user didn't copy everything (err %d, cmd %d)", + err, + cmd); + kfree(ioctl); + + return -EFAULT; + } + + /* verify the signature */ + + if (memcmp(ioctl->header.signature, + EXPRESS_IOCTL_SIGNATURE, + EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { + esas2r_log(ESAS2R_LOG_WARN, "invalid signature"); + kfree(ioctl); + + return -ENOTSUPP; + } + + /* assume success */ + + ioctl->header.return_code = IOCTL_SUCCESS; + err = 0; + + /* + * handle EXPRESS_IOCTL_GET_CHANNELS + * without paying attention to channel + */ + + if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { + int i = 0, k = 0; + + ioctl->data.chanlist.num_channels = 0; + + while (i < MAX_ADAPTERS) { + if (esas2r_adapters[i]) { + ioctl->data.chanlist.num_channels++; + ioctl->data.chanlist.channel[k] = i; + k++; + } + i++; + } + + goto ioctl_done; + } + + /* get the channel */ + + if (ioctl->header.channel == 0xFF) { + a = (struct esas2r_adapter *)hostdata; + } else { + a = esas2r_adapters[ioctl->header.channel]; + if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) { + ioctl->header.return_code = IOCTL_BAD_CHANNEL; + esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); + kfree(ioctl); + + return -ENOTSUPP; + } + } + + switch (cmd) { + case EXPRESS_IOCTL_RW_FIRMWARE: + + if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { + err = esas2r_write_fw(a, + (char *)ioctl->data.fwrw.image, + 0, + sizeof(struct + atto_express_ioctl)); + + if (err >= 0) { + err = esas2r_read_fw(a, + (char *)ioctl->data.fwrw. + image, + 0, + sizeof(struct + atto_express_ioctl)); + } + } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { + err = esas2r_write_fs(a, + (char *)ioctl->data.fwrw.image, + 0, + sizeof(struct + atto_express_ioctl)); + + if (err >= 0) { + err = esas2r_read_fs(a, + (char *)ioctl->data.fwrw. + image, + 0, + sizeof(struct + atto_express_ioctl)); + } + } else { + ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; + } + + break; + + case EXPRESS_IOCTL_READ_PARAMS: + + memcpy(ioctl->data.prw.data_buffer, a->nvram, + sizeof(struct esas2r_sas_nvram)); + ioctl->data.prw.code = 1; + break; + + case EXPRESS_IOCTL_WRITE_PARAMS: + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + up(&a->nvram_semaphore); + ioctl->data.prw.code = 0; + break; + } + + code = esas2r_write_params(a, rq, + (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); + ioctl->data.prw.code = code; + + esas2r_free_request(a, rq); + + break; + + case EXPRESS_IOCTL_DEFAULT_PARAMS: + + esas2r_nvram_get_defaults(a, + (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); + ioctl->data.prw.code = 1; + break; + + case EXPRESS_IOCTL_CHAN_INFO: + + ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; + ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; + ioctl->data.chaninfo.IRQ = a->pcid->irq; + ioctl->data.chaninfo.device_id = a->pcid->device; + ioctl->data.chaninfo.vendor_id = a->pcid->vendor; + ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; + ioctl->data.chaninfo.revision_id = a->pcid->revision; + ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; + ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; + ioctl->data.chaninfo.core_rev = 0; + ioctl->data.chaninfo.host_no = a->host->host_no; + ioctl->data.chaninfo.hbaapi_rev = 0; + break; + + case EXPRESS_IOCTL_SMP: + ioctl->header.return_code = handle_smp_ioctl(a, + &ioctl->data. + ioctl_smp); + break; + + case EXPRESS_CSMI: + ioctl->header.return_code = + handle_csmi_ioctl(a, &ioctl->data.csmi); + break; + + case EXPRESS_IOCTL_HBA: + ioctl->header.return_code = handle_hba_ioctl(a, + &ioctl->data. + ioctl_hba); + break; + + case EXPRESS_IOCTL_VDA: + err = esas2r_write_vda(a, + (char *)&ioctl->data.ioctl_vda, + 0, + sizeof(struct atto_ioctl_vda) + + ioctl->data.ioctl_vda.data_length); + + if (err >= 0) { + err = esas2r_read_vda(a, + (char *)&ioctl->data.ioctl_vda, + 0, + sizeof(struct atto_ioctl_vda) + + ioctl->data.ioctl_vda.data_length); + } + + + + + break; + + case EXPRESS_IOCTL_GET_MOD_INFO: + + ioctl->data.modinfo.adapter = a; + ioctl->data.modinfo.pci_dev = a->pcid; + ioctl->data.modinfo.scsi_host = a->host; + ioctl->data.modinfo.host_no = a->host->host_no; + + break; + + default: + esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd); + ioctl->header.return_code = IOCTL_ERR_INVCMD; + } + +ioctl_done: + + if (err < 0) { + esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err, + cmd); + + switch (err) { + case -ENOMEM: + case -EBUSY: + ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; + break; + + case -ENOSYS: + case -EINVAL: + ioctl->header.return_code = IOCTL_INVALID_PARAM; + break; + } + + ioctl->header.return_code = IOCTL_GENERAL_ERROR; + } + + /* Always copy the buffer back, if only to pick up the status */ + err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); + if (err != 0) { + esas2r_log(ESAS2R_LOG_WARN, + "ioctl_handler copy_to_user didn't copy " + "everything (err %d, cmd %d)", err, + cmd); + kfree(ioctl); + + return -EFAULT; + } + + kfree(ioctl); + + return 0; +} + +int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg) +{ + return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); +} + +static void free_fw_buffers(struct esas2r_adapter *a) +{ + if (a->firmware.data) { + dma_free_coherent(&a->pcid->dev, + (size_t)a->firmware.orig_len, + a->firmware.data, + (dma_addr_t)a->firmware.phys); + + a->firmware.data = NULL; + } +} + +static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) +{ + free_fw_buffers(a); + + a->firmware.orig_len = length; + + a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev, + (size_t)length, + (dma_addr_t *)&a->firmware. + phys, + GFP_KERNEL); + + if (!a->firmware.data) { + esas2r_debug("buffer alloc failed!"); + return 0; + } + + return 1; +} + +/* Handle a call to read firmware. */ +int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) +{ + esas2r_trace_enter(); + /* if the cached header is a status, simply copy it over and return. */ + if (a->firmware.state == FW_STATUS_ST) { + int size = min_t(int, count, sizeof(a->firmware.header)); + esas2r_trace_exit(); + memcpy(buf, &a->firmware.header, size); + esas2r_debug("esas2r_read_fw: STATUS size %d", size); + return size; + } + + /* + * if the cached header is a command, do it if at + * offset 0, otherwise copy the pieces. + */ + + if (a->firmware.state == FW_COMMAND_ST) { + u32 length = a->firmware.header.length; + esas2r_trace_exit(); + + esas2r_debug("esas2r_read_fw: COMMAND length %d off %d", + length, + off); + + if (off == 0) { + if (a->firmware.header.action == FI_ACT_UP) { + if (!allocate_fw_buffers(a, length)) + return -ENOMEM; + + + /* copy header over */ + + memcpy(a->firmware.data, + &a->firmware.header, + sizeof(a->firmware.header)); + + do_fm_api(a, + (struct esas2r_flash_img *)a->firmware.data); + } else if (a->firmware.header.action == FI_ACT_UPSZ) { + int size = + min((int)count, + (int)sizeof(a->firmware.header)); + do_fm_api(a, &a->firmware.header); + memcpy(buf, &a->firmware.header, size); + esas2r_debug("FI_ACT_UPSZ size %d", size); + return size; + } else { + esas2r_debug("invalid action %d", + a->firmware.header.action); + return -ENOSYS; + } + } + + if (count + off > length) + count = length - off; + + if (count < 0) + return 0; + + if (!a->firmware.data) { + esas2r_debug( + "read: nonzero offset but no buffer available!"); + return -ENOMEM; + } + + esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off, + count, + length); + + memcpy(buf, &a->firmware.data[off], count); + + /* when done, release the buffer */ + + if (length <= off + count) { + esas2r_debug("esas2r_read_fw: freeing buffer!"); + + free_fw_buffers(a); + } + + return count; + } + + esas2r_trace_exit(); + esas2r_debug("esas2r_read_fw: invalid firmware state %d", + a->firmware.state); + + return -EINVAL; +} + +/* Handle a call to write firmware. */ +int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + u32 length; + + if (off == 0) { + struct esas2r_flash_img *header = + (struct esas2r_flash_img *)buf; + + /* assume version 0 flash image */ + + int min_size = sizeof(struct esas2r_flash_img_v0); + + a->firmware.state = FW_INVALID_ST; + + /* validate the version field first */ + + if (count < 4 + || header->fi_version > FI_VERSION_1) { + esas2r_debug( + "esas2r_write_fw: short header or invalid version"); + return -EINVAL; + } + + /* See if its a version 1 flash image */ + + if (header->fi_version == FI_VERSION_1) + min_size = sizeof(struct esas2r_flash_img); + + /* If this is the start, the header must be full and valid. */ + if (count < min_size) { + esas2r_debug("esas2r_write_fw: short header, aborting"); + return -EINVAL; + } + + /* Make sure the size is reasonable. */ + length = header->length; + + if (length > 1024 * 1024) { + esas2r_debug( + "esas2r_write_fw: hosed, length %d fi_version %d", + length, header->fi_version); + return -EINVAL; + } + + /* + * If this is a write command, allocate memory because + * we have to cache everything. otherwise, just cache + * the header, because the read op will do the command. + */ + + if (header->action == FI_ACT_DOWN) { + if (!allocate_fw_buffers(a, length)) + return -ENOMEM; + + /* + * Store the command, so there is context on subsequent + * calls. + */ + memcpy(&a->firmware.header, + buf, + sizeof(*header)); + } else if (header->action == FI_ACT_UP + || header->action == FI_ACT_UPSZ) { + /* Save the command, result will be picked up on read */ + memcpy(&a->firmware.header, + buf, + sizeof(*header)); + + a->firmware.state = FW_COMMAND_ST; + + esas2r_debug( + "esas2r_write_fw: COMMAND, count %d, action %d ", + count, header->action); + + /* + * Pretend we took the whole buffer, + * so we don't get bothered again. + */ + + return count; + } else { + esas2r_debug("esas2r_write_fw: invalid action %d ", + a->firmware.header.action); + return -ENOSYS; + } + } else { + length = a->firmware.header.length; + } + + /* + * We only get here on a download command, regardless of offset. + * the chunks written by the system need to be cached, and when + * the final one arrives, issue the fmapi command. + */ + + if (off + count > length) + count = length - off; + + if (count > 0) { + esas2r_debug("esas2r_write_fw: off %d count %d length %d", off, + count, + length); + + /* + * On a full upload, the system tries sending the whole buffer. + * there's nothing to do with it, so just drop it here, before + * trying to copy over into unallocated memory! + */ + if (a->firmware.header.action == FI_ACT_UP) + return count; + + if (!a->firmware.data) { + esas2r_debug( + "write: nonzero offset but no buffer available!"); + return -ENOMEM; + } + + memcpy(&a->firmware.data[off], buf, count); + + if (length == off + count) { + do_fm_api(a, + (struct esas2r_flash_img *)a->firmware.data); + + /* + * Now copy the header result to be picked up by the + * next read + */ + memcpy(&a->firmware.header, + a->firmware.data, + sizeof(a->firmware.header)); + + a->firmware.state = FW_STATUS_ST; + + esas2r_debug("write completed"); + + /* + * Since the system has the data buffered, the only way + * this can leak is if a root user writes a program + * that writes a shorter buffer than it claims, and the + * copyin fails. + */ + free_fw_buffers(a); + } + } + + return count; +} + +/* Callback for the completion of a VDA request. */ +static void vda_complete_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->vda_command_done = 1; + wake_up_interruptible(&a->vda_waiter); +} + +/* Scatter/gather callback for VDA requests */ +static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; + + (*addr) = a->ppvda_buffer + offset; + return VDA_MAX_BUFFER_SIZE - offset; +} + +/* Handle a call to read a VDA command. */ +int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) +{ + if (!a->vda_buffer) + return -ENOMEM; + + if (off == 0) { + struct esas2r_request *rq; + struct atto_ioctl_vda *vi = + (struct atto_ioctl_vda *)a->vda_buffer; + struct esas2r_sg_context sgc; + bool wait_for_completion; + + /* + * Presumeably, someone has already written to the vda_buffer, + * and now they are reading the node the response, so now we + * will actually issue the request to the chip and reply. + */ + + /* allocate a request */ + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_debug("esas2r_read_vda: out of requestss"); + return -EBUSY; + } + + rq->comp_cb = vda_complete_req; + + sgc.first_req = rq; + sgc.adapter = a; + sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; + + a->vda_command_done = 0; + + wait_for_completion = + esas2r_process_vda_ioctl(a, vi, rq, &sgc); + + if (wait_for_completion) { + /* now wait around for it to complete. */ + + while (!a->vda_command_done) + wait_event_interruptible(a->vda_waiter, + a->vda_command_done); + } + + esas2r_free_request(a, (struct esas2r_request *)rq); + } + + if (off > VDA_MAX_BUFFER_SIZE) + return 0; + + if (count + off > VDA_MAX_BUFFER_SIZE) + count = VDA_MAX_BUFFER_SIZE - off; + + if (count < 0) + return 0; + + memcpy(buf, a->vda_buffer + off, count); + + return count; +} + +/* Handle a call to write a VDA command. */ +int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + /* + * allocate memory for it, if not already done. once allocated, + * we will keep it around until the driver is unloaded. + */ + + if (!a->vda_buffer) { + dma_addr_t dma_addr; + a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev, + (size_t) + VDA_MAX_BUFFER_SIZE, + &dma_addr, + GFP_KERNEL); + + a->ppvda_buffer = dma_addr; + } + + if (!a->vda_buffer) + return -ENOMEM; + + if (off > VDA_MAX_BUFFER_SIZE) + return 0; + + if (count + off > VDA_MAX_BUFFER_SIZE) + count = VDA_MAX_BUFFER_SIZE - off; + + if (count < 1) + return 0; + + memcpy(a->vda_buffer + off, buf, count); + + return count; +} + +/* Callback for the completion of an FS_API request.*/ +static void fs_api_complete_req(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + a->fs_api_command_done = 1; + + wake_up_interruptible(&a->fs_api_waiter); +} + +/* Scatter/gather callback for VDA requests */ +static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)a->fs_api_buffer; + u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; + + (*addr) = a->ppfs_api_buffer + offset; + + return a->fs_api_buffer_size - offset; +} + +/* Handle a call to read firmware via FS_API. */ +int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) +{ + if (!a->fs_api_buffer) + return -ENOMEM; + + if (off == 0) { + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + struct esas2r_ioctl_fs *fs = + (struct esas2r_ioctl_fs *)a->fs_api_buffer; + + /* If another flash request is already in progress, return. */ + if (down_interruptible(&a->fs_api_semaphore)) { +busy: + fs->status = ATTO_STS_OUT_OF_RSRC; + return -EBUSY; + } + + /* + * Presumeably, someone has already written to the + * fs_api_buffer, and now they are reading the node the + * response, so now we will actually issue the request to the + * chip and reply. Allocate a request + */ + + rq = esas2r_alloc_request(a); + if (rq == NULL) { + esas2r_debug("esas2r_read_fs: out of requests"); + up(&a->fs_api_semaphore); + goto busy; + } + + rq->comp_cb = fs_api_complete_req; + + /* Set up the SGCONTEXT for to build the s/g table */ + + sgc.cur_offset = fs->data; + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; + + a->fs_api_command_done = 0; + + if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) { + if (fs->status == ATTO_STS_OUT_OF_RSRC) + count = -EBUSY; + + goto dont_wait; + } + + /* Now wait around for it to complete. */ + + while (!a->fs_api_command_done) + wait_event_interruptible(a->fs_api_waiter, + a->fs_api_command_done); + ; +dont_wait: + /* Free the request and keep going */ + up(&a->fs_api_semaphore); + esas2r_free_request(a, (struct esas2r_request *)rq); + + /* Pick up possible error code from above */ + if (count < 0) + return count; + } + + if (off > a->fs_api_buffer_size) + return 0; + + if (count + off > a->fs_api_buffer_size) + count = a->fs_api_buffer_size - off; + + if (count < 0) + return 0; + + memcpy(buf, a->fs_api_buffer + off, count); + + return count; +} + +/* Handle a call to write firmware via FS_API. */ +int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, + int count) +{ + if (off == 0) { + struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; + u32 length = fs->command.length + offsetof( + struct esas2r_ioctl_fs, + data); + + /* + * Special case, for BEGIN commands, the length field + * is lying to us, so just get enough for the header. + */ + + if (fs->command.command == ESAS2R_FS_CMD_BEGINW) + length = offsetof(struct esas2r_ioctl_fs, data); + + /* + * Beginning a command. We assume we'll get at least + * enough in the first write so we can look at the + * header and see how much we need to alloc. + */ + + if (count < offsetof(struct esas2r_ioctl_fs, data)) + return -EINVAL; + + /* Allocate a buffer or use the existing buffer. */ + if (a->fs_api_buffer) { + if (a->fs_api_buffer_size < length) { + /* Free too-small buffer and get a new one */ + dma_free_coherent(&a->pcid->dev, + (size_t)a->fs_api_buffer_size, + a->fs_api_buffer, + (dma_addr_t)a->ppfs_api_buffer); + + goto re_allocate_buffer; + } + } else { +re_allocate_buffer: + a->fs_api_buffer_size = length; + + a->fs_api_buffer = (u8 *)dma_alloc_coherent( + &a->pcid->dev, + (size_t)a->fs_api_buffer_size, + (dma_addr_t *)&a->ppfs_api_buffer, + GFP_KERNEL); + } + } + + if (!a->fs_api_buffer) + return -ENOMEM; + + if (off > a->fs_api_buffer_size) + return 0; + + if (count + off > a->fs_api_buffer_size) + count = a->fs_api_buffer_size - off; + + if (count < 1) + return 0; + + memcpy(a->fs_api_buffer + off, buf, count); + + return count; +} diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c new file mode 100644 index 00000000000..a82030aa857 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_log.c @@ -0,0 +1,250 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_log.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +/* + * this module within the driver is tasked with providing logging functionality. + * the event_log_level module parameter controls the level of messages that are + * written to the system log. the default level of messages that are written + * are critical and warning messages. if other types of messages are desired, + * one simply needs to load the module with the correct value for the + * event_log_level module parameter. for example: + * + * insmod <module> event_log_level=1 + * + * will load the module and only critical events will be written by this module + * to the system log. if critical, warning, and information-level messages are + * desired, the correct value for the event_log_level module parameter + * would be as follows: + * + * insmod <module> event_log_level=3 + */ + +#define EVENT_LOG_BUFF_SIZE 1024 + +static long event_log_level = ESAS2R_LOG_DFLT; + +module_param(event_log_level, long, S_IRUGO | S_IRUSR); +MODULE_PARM_DESC(event_log_level, + "Specifies the level of events to report to the system log. Critical and warning level events are logged by default."); + +/* A shared buffer to use for formatting messages. */ +static char event_buffer[EVENT_LOG_BUFF_SIZE]; + +/* A lock to protect the shared buffer used for formatting messages. */ +static DEFINE_SPINLOCK(event_buffer_lock); + +/** + * translates an esas2r-defined logging event level to a kernel logging level. + * + * @param [in] level the esas2r-defined logging event level to translate + * + * @return the corresponding kernel logging level. + */ +static const char *translate_esas2r_event_level_to_kernel(const long level) +{ + switch (level) { + case ESAS2R_LOG_CRIT: + return KERN_CRIT; + + case ESAS2R_LOG_WARN: + return KERN_WARNING; + + case ESAS2R_LOG_INFO: + return KERN_INFO; + + case ESAS2R_LOG_DEBG: + case ESAS2R_LOG_TRCE: + default: + return KERN_DEBUG; + } +} + +/** + * the master logging function. this function will format the message as + * outlined by the formatting string, the input device information and the + * substitution arguments and output the resulting string to the system log. + * + * @param [in] level the event log level of the message + * @param [in] dev the device information + * @param [in] format the formatting string for the message + * @param [in] args the substition arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +static int esas2r_log_master(const long level, + const struct device *dev, + const char *format, + va_list args) +{ + if (level <= event_log_level) { + unsigned long flags = 0; + int retval = 0; + char *buffer = event_buffer; + size_t buflen = EVENT_LOG_BUFF_SIZE; + const char *fmt_nodev = "%s%s: "; + const char *fmt_dev = "%s%s [%s, %s, %s]"; + const char *slevel = + translate_esas2r_event_level_to_kernel(level); + + spin_lock_irqsave(&event_buffer_lock, flags); + + if (buffer == NULL) { + spin_unlock_irqrestore(&event_buffer_lock, flags); + return -1; + } + + memset(buffer, 0, buflen); + + /* + * format the level onto the beginning of the string and do + * some pointer arithmetic to move the pointer to the point + * where the actual message can be inserted. + */ + + if (dev == NULL) { + snprintf(buffer, buflen, fmt_nodev, slevel, + ESAS2R_DRVR_NAME); + } else { + snprintf(buffer, buflen, fmt_dev, slevel, + ESAS2R_DRVR_NAME, + (dev->driver ? dev->driver->name : "unknown"), + (dev->bus ? dev->bus->name : "unknown"), + dev_name(dev)); + } + + buffer += strlen(event_buffer); + buflen -= strlen(event_buffer); + + retval = vsnprintf(buffer, buflen, format, args); + if (retval < 0) { + spin_unlock_irqrestore(&event_buffer_lock, flags); + return -1; + } + + /* + * Put a line break at the end of the formatted string so that + * we don't wind up with run-on messages. + */ + printk("%s\n", event_buffer); + + spin_unlock_irqrestore(&event_buffer_lock, flags); + } + + return 0; +} + +/** + * formats and logs a message to the system log. + * + * @param [in] level the event level of the message + * @param [in] format the formating string for the message + * @param [in] ... the substitution arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log(const long level, const char *format, ...) +{ + int retval = 0; + va_list args; + + va_start(args, format); + + retval = esas2r_log_master(level, NULL, format, args); + + va_end(args); + + return retval; +} + +/** + * formats and logs a message to the system log. this message will include + * device information. + * + * @param [in] level the event level of the message + * @param [in] dev the device information + * @param [in] format the formatting string for the message + * @param [in] ... the substitution arguments to the formatting string + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log_dev(const long level, + const struct device *dev, + const char *format, + ...) +{ + int retval = 0; + va_list args; + + va_start(args, format); + + retval = esas2r_log_master(level, dev, format, args); + + va_end(args); + + return retval; +} + +/** + * formats and logs a message to the system log. this message will include + * device information. + * + * @param [in] level the event level of the message + * @param [in] buf + * @param [in] len + * + * @return 0 on success, or -1 if an error occurred. + */ +int esas2r_log_hexdump(const long level, + const void *buf, + size_t len) +{ + if (level <= event_log_level) { + print_hex_dump(translate_esas2r_event_level_to_kernel(level), + "", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + } + + return 1; +} diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h new file mode 100644 index 00000000000..7b6397bb5b9 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_log.h @@ -0,0 +1,118 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_log.h + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef __esas2r_log_h__ +#define __esas2r_log_h__ + +struct device; + +enum { + ESAS2R_LOG_NONE = 0, /* no events logged */ + ESAS2R_LOG_CRIT = 1, /* critical events */ + ESAS2R_LOG_WARN = 2, /* warning events */ + ESAS2R_LOG_INFO = 3, /* info events */ + ESAS2R_LOG_DEBG = 4, /* debugging events */ + ESAS2R_LOG_TRCE = 5, /* tracing events */ + +#ifdef ESAS2R_TRACE + ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE +#else + ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN +#endif +}; + +int esas2r_log(const long level, const char *format, ...); +int esas2r_log_dev(const long level, + const struct device *dev, + const char *format, + ...); +int esas2r_log_hexdump(const long level, + const void *buf, + size_t len); + +/* + * the following macros are provided specifically for debugging and tracing + * messages. esas2r_debug() is provided for generic non-hardware layer + * debugging and tracing events. esas2r_hdebug is provided specifically for + * hardware layer debugging and tracing events. + */ + +#ifdef ESAS2R_DEBUG +#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args) +#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args) +#else +#define esas2r_debug(f, args ...) +#define esas2r_hdebug(f, args ...) +#endif /* ESAS2R_DEBUG */ + +/* + * the following macros are provided in order to trace the driver and catch + * some more serious bugs. be warned, enabling these macros may *severely* + * impact performance. + */ + +#ifdef ESAS2R_TRACE +#define esas2r_bugon() \ + do { \ + esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \ + " - dumping stack and stopping kernel", __func__, \ + __LINE__); \ + dump_stack(); \ + BUG(); \ + } while (0) + +#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \ + __func__, __FILE__, __LINE__) +#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \ + __func__, __FILE__, __LINE__) +#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \ + f, __func__, __FILE__, __LINE__, \ + ## args) +#else +#define esas2r_bugon() +#define esas2r_trace_enter() +#define esas2r_trace_exit() +#define esas2r_trace(f, args ...) +#endif /* ESAS2R_TRACE */ + +#endif /* __esas2r_log_h__ */ diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c new file mode 100644 index 00000000000..6504a195c87 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -0,0 +1,2032 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_main.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver"); +MODULE_AUTHOR("ATTO Technology, Inc."); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ESAS2R_VERSION_STR); + +/* global definitions */ + +static int found_adapters; +struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS]; + +#define ESAS2R_VDA_EVENT_PORT1 54414 +#define ESAS2R_VDA_EVENT_PORT2 54415 +#define ESAS2R_VDA_EVENT_SOCK_COUNT 2 + +static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct Scsi_Host *host = class_to_shost(dev); + + return (struct esas2r_adapter *)host->hostdata; +} + +static ssize_t read_fw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_fw(a, buf, off, count); +} + +static ssize_t write_fw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_write_fw(a, buf, off, count); +} + +static ssize_t read_fs(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_fs(a, buf, off, count); +} + +static ssize_t write_fs(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min(sizeof(struct esas2r_ioctl_fs), count); + int result = 0; + + result = esas2r_write_fs(a, buf, off, count); + + if (result < 0) + result = 0; + + return length; +} + +static ssize_t read_vda(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_read_vda(a, buf, off, count); +} + +static ssize_t write_vda(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + return esas2r_write_vda(a, buf, off, count); +} + +static ssize_t read_live_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE); + + memcpy(buf, a->nvram, length); + return length; +} + +static ssize_t write_live_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + struct esas2r_request *rq; + int result = -EFAULT; + + rq = esas2r_alloc_request(a); + if (rq == NULL) + return -ENOMEM; + + if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf)) + result = count; + + esas2r_free_request(a, rq); + + return result; +} + +static ssize_t read_default_nvram(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + + esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf); + + return sizeof(struct esas2r_sas_nvram); +} + +static ssize_t read_hw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE); + + if (!a->local_atto_ioctl) + return -ENOMEM; + + if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS) + return -ENOMEM; + + memcpy(buf, a->local_atto_ioctl, length); + + return length; +} + +static ssize_t write_hw(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj); + int length = min(sizeof(struct atto_ioctl), count); + + if (!a->local_atto_ioctl) { + a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl), + GFP_KERNEL); + if (a->local_atto_ioctl == NULL) { + esas2r_log(ESAS2R_LOG_WARN, + "write_hw kzalloc failed for %d bytes", + sizeof(struct atto_ioctl)); + return -ENOMEM; + } + } + + memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl)); + memcpy(a->local_atto_ioctl, buf, length); + + return length; +} + +#define ESAS2R_RW_BIN_ATTR(_name) \ + struct bin_attribute bin_attr_ ## _name = { \ + .attr = \ + { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \ + .size = 0, \ + .read = read_ ## _name, \ + .write = write_ ## _name } + +ESAS2R_RW_BIN_ATTR(fw); +ESAS2R_RW_BIN_ATTR(fs); +ESAS2R_RW_BIN_ATTR(vda); +ESAS2R_RW_BIN_ATTR(hw); +ESAS2R_RW_BIN_ATTR(live_nvram); + +struct bin_attribute bin_attr_default_nvram = { + .attr = { .name = "default_nvram", .mode = S_IRUGO }, + .size = 0, + .read = read_default_nvram, + .write = NULL +}; + +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .show_info = esas2r_show_info, + .name = ESAS2R_LONGNAME, + .release = esas2r_release, + .info = esas2r_info, + .ioctl = esas2r_ioctl, + .queuecommand = esas2r_queuecommand, + .eh_abort_handler = esas2r_eh_abort, + .eh_device_reset_handler = esas2r_device_reset, + .eh_bus_reset_handler = esas2r_bus_reset, + .eh_host_reset_handler = esas2r_host_reset, + .eh_target_reset_handler = esas2r_target_reset, + .can_queue = 128, + .this_id = -1, + .sg_tablesize = SCSI_MAX_SG_SEGMENTS, + .cmd_per_lun = + ESAS2R_DEFAULT_CMD_PER_LUN, + .present = 0, + .unchecked_isa_dma = 0, + .use_clustering = ENABLE_CLUSTERING, + .emulated = 0, + .proc_name = ESAS2R_DRVR_NAME, + .slave_configure = esas2r_slave_configure, + .slave_alloc = esas2r_slave_alloc, + .slave_destroy = esas2r_slave_destroy, + .change_queue_depth = esas2r_change_queue_depth, + .change_queue_type = esas2r_change_queue_type, + .max_sectors = 0xFFFF, +}; + +int sgl_page_size = 512; +module_param(sgl_page_size, int, 0); +MODULE_PARM_DESC(sgl_page_size, + "Scatter/gather list (SGL) page size in number of S/G " + "entries. If your application is doing a lot of very large " + "transfers, you may want to increase the SGL page size. " + "Default 512."); + +int num_sg_lists = 1024; +module_param(num_sg_lists, int, 0); +MODULE_PARM_DESC(num_sg_lists, + "Number of scatter/gather lists. Default 1024."); + +int sg_tablesize = SCSI_MAX_SG_SEGMENTS; +module_param(sg_tablesize, int, 0); +MODULE_PARM_DESC(sg_tablesize, + "Maximum number of entries in a scatter/gather table."); + +int num_requests = 256; +module_param(num_requests, int, 0); +MODULE_PARM_DESC(num_requests, + "Number of requests. Default 256."); + +int num_ae_requests = 4; +module_param(num_ae_requests, int, 0); +MODULE_PARM_DESC(num_ae_requests, + "Number of VDA asynchromous event requests. Default 4."); + +int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN; +module_param(cmd_per_lun, int, 0); +MODULE_PARM_DESC(cmd_per_lun, + "Maximum number of commands per LUN. Default " + DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) "."); + +int can_queue = 128; +module_param(can_queue, int, 0); +MODULE_PARM_DESC(can_queue, + "Maximum number of commands per adapter. Default 128."); + +int esas2r_max_sectors = 0xFFFF; +module_param(esas2r_max_sectors, int, 0); +MODULE_PARM_DESC(esas2r_max_sectors, + "Maximum number of disk sectors in a single data transfer. " + "Default 65535 (largest possible setting)."); + +int interrupt_mode = 1; +module_param(interrupt_mode, int, 0); +MODULE_PARM_DESC(interrupt_mode, + "Defines the interrupt mode to use. 0 for legacy" + ", 1 for MSI. Default is MSI (1)."); + +static struct pci_device_id + esas2r_pci_table[] = { + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D, + 0, + 0, 0 }, + { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E, + 0, + 0, 0 }, + { 0, 0, 0, 0, + 0, + 0, 0 } +}; + +MODULE_DEVICE_TABLE(pci, esas2r_pci_table); + +static int +esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id); + +static void +esas2r_remove(struct pci_dev *pcid); + +static struct pci_driver + esas2r_pci_driver = { + .name = ESAS2R_DRVR_NAME, + .id_table = esas2r_pci_table, + .probe = esas2r_probe, + .remove = esas2r_remove, + .suspend = esas2r_suspend, + .resume = esas2r_resume, +}; + +static int esas2r_probe(struct pci_dev *pcid, + const struct pci_device_id *id) +{ + struct Scsi_Host *host = NULL; + struct esas2r_adapter *a; + int err; + + size_t host_alloc_size = sizeof(struct esas2r_adapter) + + ((num_requests) + + 1) * sizeof(struct esas2r_request); + + esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev), + "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x", + pcid->vendor, + pcid->device, + pcid->subsystem_vendor, + pcid->subsystem_device); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "before pci_enable_device() " + "enable_cnt: %d", + pcid->enable_cnt.counter); + + err = pci_enable_device(pcid); + if (err != 0) { + esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev), + "pci_enable_device() FAIL (%d)", + err); + return -ENODEV; + } + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "pci_enable_device() OK"); + esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), + "after pci_enable_device() enable_cnt: %d", + pcid->enable_cnt.counter); + + host = scsi_host_alloc(&driver_template, host_alloc_size); + if (host == NULL) { + esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL"); + return -ENODEV; + } + + memset(host->hostdata, 0, host_alloc_size); + + a = (struct esas2r_adapter *)host->hostdata; + + esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host); + + /* override max LUN and max target id */ + + host->max_id = ESAS2R_MAX_ID + 1; + host->max_lun = 255; + + /* we can handle 16-byte CDbs */ + + host->max_cmd_len = 16; + + host->can_queue = can_queue; + host->cmd_per_lun = cmd_per_lun; + host->this_id = host->max_id + 1; + host->max_channel = 0; + host->unique_id = found_adapters; + host->sg_tablesize = sg_tablesize; + host->max_sectors = esas2r_max_sectors; + + /* set to bus master for BIOses that don't do it for us */ + + esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called"); + + pci_set_master(pcid); + + if (!esas2r_init_adapter(host, pcid, found_adapters)) { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to initialize device at PCI bus %x:%x", + pcid->bus->number, + pcid->devfn); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(host); + + return 0; + + } + + esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid, + host->hostdata); + + pci_set_drvdata(pcid, host); + + esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called"); + + err = scsi_add_host(host, &pcid->dev); + + if (err) { + esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err); + esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev), + "scsi_add_host() FAIL"); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_host_put() called"); + + scsi_host_put(host); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "pci_set_drvdata(%p, NULL) called", + pcid); + + pci_set_drvdata(pcid, NULL); + + return -ENODEV; + } + + + esas2r_fw_event_on(a); + + esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev), + "scsi_scan_host() called"); + + scsi_scan_host(host); + + /* Add sysfs binary files */ + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: fw"); + else + a->sysfs_fw_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: fs"); + else + a->sysfs_fs_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: vda"); + else + a->sysfs_vda_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: hw"); + else + a->sysfs_hw_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: live_nvram"); + else + a->sysfs_live_nvram_created = 1; + + if (sysfs_create_bin_file(&host->shost_dev.kobj, + &bin_attr_default_nvram)) + esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev), + "Failed to create sysfs binary file: default_nvram"); + else + a->sysfs_default_nvram_created = 1; + + found_adapters++; + + return 0; +} + +static void esas2r_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host; + int index; + + if (pdev == NULL) { + esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL"); + return; + } + + host = pci_get_drvdata(pdev); + + if (host == NULL) { + /* + * this can happen if pci_set_drvdata was already called + * to clear the host pointer. if this is the case, we + * are okay; this channel has already been cleaned up. + */ + + return; + } + + esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), + "esas2r_remove(%p) called; " + "host:%p", pdev, + host); + + index = esas2r_cleanup(host); + + if (index < 0) + esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev), + "unknown host in %s", + __func__); + + found_adapters--; + + /* if this was the last adapter, clean up the rest of the driver */ + + if (found_adapters == 0) + esas2r_cleanup(NULL); +} + +static int __init esas2r_init(void) +{ + int i; + + esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); + + /* verify valid parameters */ + + if (can_queue < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: can_queue must be at least 1, value " + "forced."); + can_queue = 1; + } else if (can_queue > 2048) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: can_queue must be no larger than 2048, " + "value forced."); + can_queue = 2048; + } + + if (cmd_per_lun < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: cmd_per_lun must be at least 1, value " + "forced."); + cmd_per_lun = 1; + } else if (cmd_per_lun > 2048) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: cmd_per_lun must be no larger than " + "2048, value forced."); + cmd_per_lun = 2048; + } + + if (sg_tablesize < 32) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: sg_tablesize must be at least 32, " + "value forced."); + sg_tablesize = 32; + } + + if (esas2r_max_sectors < 1) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: esas2r_max_sectors must be at least " + "1, value forced."); + esas2r_max_sectors = 1; + } else if (esas2r_max_sectors > 0xffff) { + esas2r_log(ESAS2R_LOG_WARN, + "warning: esas2r_max_sectors must be no larger " + "than 0xffff, value forced."); + esas2r_max_sectors = 0xffff; + } + + sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1); + + if (sgl_page_size < SGL_PG_SZ_MIN) + sgl_page_size = SGL_PG_SZ_MIN; + else if (sgl_page_size > SGL_PG_SZ_MAX) + sgl_page_size = SGL_PG_SZ_MAX; + + if (num_sg_lists < NUM_SGL_MIN) + num_sg_lists = NUM_SGL_MIN; + else if (num_sg_lists > NUM_SGL_MAX) + num_sg_lists = NUM_SGL_MAX; + + if (num_requests < NUM_REQ_MIN) + num_requests = NUM_REQ_MIN; + else if (num_requests > NUM_REQ_MAX) + num_requests = NUM_REQ_MAX; + + if (num_ae_requests < NUM_AE_MIN) + num_ae_requests = NUM_AE_MIN; + else if (num_ae_requests > NUM_AE_MAX) + num_ae_requests = NUM_AE_MAX; + + /* set up other globals */ + + for (i = 0; i < MAX_ADAPTERS; i++) + esas2r_adapters[i] = NULL; + + /* initialize */ + + driver_template.module = THIS_MODULE; + + if (pci_register_driver(&esas2r_pci_driver) != 0) + esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED"); + else + esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK"); + + if (!found_adapters) { + pci_unregister_driver(&esas2r_pci_driver); + esas2r_cleanup(NULL); + + esas2r_log(ESAS2R_LOG_CRIT, + "driver will not be loaded because no ATTO " + "%s devices were found", + ESAS2R_DRVR_NAME); + return -1; + } else { + esas2r_log(ESAS2R_LOG_INFO, "found %d adapters", + found_adapters); + } + + return 0; +} + +/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */ +static const struct file_operations esas2r_proc_fops = { + .compat_ioctl = esas2r_proc_ioctl, + .unlocked_ioctl = esas2r_proc_ioctl, +}; + +static struct Scsi_Host *esas2r_proc_host; +static int esas2r_proc_major; + +long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + return esas2r_ioctl_handler(esas2r_proc_host->hostdata, + (int)cmd, (void __user *)arg); +} + +static void __exit esas2r_exit(void) +{ + esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); + + if (esas2r_proc_major > 0) { + esas2r_log(ESAS2R_LOG_INFO, "unregister proc"); + + remove_proc_entry(ATTONODE_NAME, + esas2r_proc_host->hostt->proc_dir); + unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME); + + esas2r_proc_major = 0; + } + + esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called"); + + pci_unregister_driver(&esas2r_pci_driver); +} + +int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; + + struct esas2r_target *t; + int dev_count = 0; + + esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no); + + seq_printf(m, ESAS2R_LONGNAME "\n" + "Driver version: "ESAS2R_VERSION_STR "\n" + "Flash version: %s\n" + "Firmware version: %s\n" + "Copyright "ESAS2R_COPYRIGHT_YEARS "\n" + "http://www.attotech.com\n" + "\n", + a->flash_rev, + a->fw_rev[0] ? a->fw_rev : "(none)"); + + + seq_printf(m, "Adapter information:\n" + "--------------------\n" + "Model: %s\n" + "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n", + esas2r_get_model_name(a), + a->nvram->sas_addr[0], + a->nvram->sas_addr[1], + a->nvram->sas_addr[2], + a->nvram->sas_addr[3], + a->nvram->sas_addr[4], + a->nvram->sas_addr[5], + a->nvram->sas_addr[6], + a->nvram->sas_addr[7]); + + seq_puts(m, "\n" + "Discovered devices:\n" + "\n" + " # Target ID\n" + "---------------\n"); + + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->buffered_target_state == TS_PRESENT) { + seq_printf(m, " %3d %3d\n", + ++dev_count, + (u16)(uintptr_t)(t - a->targetdb)); + } + + if (dev_count == 0) + seq_puts(m, "none\n"); + + seq_puts(m, "\n"); + return 0; + +} + +int esas2r_release(struct Scsi_Host *sh) +{ + esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev), + "esas2r_release() called"); + + esas2r_cleanup(sh); + if (sh->irq) + free_irq(sh->irq, NULL); + scsi_unregister(sh); + return 0; +} + +const char *esas2r_info(struct Scsi_Host *sh) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; + static char esas2r_info_str[512]; + + esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev), + "esas2r_info() called"); + + /* + * if we haven't done so already, register as a char driver + * and stick a node under "/proc/scsi/esas2r/ATTOnode" + */ + + if (esas2r_proc_major <= 0) { + esas2r_proc_host = sh; + + esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME, + &esas2r_proc_fops); + + esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev), + "register_chrdev (major %d)", + esas2r_proc_major); + + if (esas2r_proc_major > 0) { + struct proc_dir_entry *pde; + + pde = proc_create(ATTONODE_NAME, 0, + sh->hostt->proc_dir, + &esas2r_proc_fops); + + if (!pde) { + esas2r_log_dev(ESAS2R_LOG_WARN, + &(sh->shost_gendev), + "failed to create_proc_entry"); + esas2r_proc_major = -1; + } + } + } + + sprintf(esas2r_info_str, + ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)" + " driver version: "ESAS2R_VERSION_STR " firmware version: " + "%s\n", + a->pcid->bus->number, a->pcid->devfn, a->pcid->irq, + a->fw_rev[0] ? a->fw_rev : "(none)"); + + return esas2r_info_str; +} + +/* Callback for building a request scatter/gather list */ +static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr) +{ + u32 len; + + if (likely(sgc->cur_offset == sgc->exp_offset)) { + /* + * the normal case: caller used all bytes from previous call, so + * expected offset is the same as the current offset. + */ + + if (sgc->sgel_count < sgc->num_sgel) { + /* retrieve next segment, except for first time */ + if (sgc->exp_offset > (u8 *)0) { + /* advance current segment */ + sgc->cur_sgel = sg_next(sgc->cur_sgel); + ++(sgc->sgel_count); + } + + + len = sg_dma_len(sgc->cur_sgel); + (*addr) = sg_dma_address(sgc->cur_sgel); + + /* save the total # bytes returned to caller so far */ + sgc->exp_offset += len; + + } else { + len = 0; + } + } else if (sgc->cur_offset < sgc->exp_offset) { + /* + * caller did not use all bytes from previous call. need to + * compute the address based on current segment. + */ + + len = sg_dma_len(sgc->cur_sgel); + (*addr) = sg_dma_address(sgc->cur_sgel); + + sgc->exp_offset -= len; + + /* calculate PA based on prev segment address and offsets */ + *addr = *addr + + (sgc->cur_offset - sgc->exp_offset); + + sgc->exp_offset += len; + + /* re-calculate length based on offset */ + len = lower_32_bits( + sgc->exp_offset - sgc->cur_offset); + } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */ + /* + * we don't expect the caller to skip ahead. + * cur_offset will never exceed the len we return + */ + len = 0; + } + + return len; +} + +int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *rq; + struct esas2r_sg_context sgc; + unsigned bufflen; + + /* Assume success, if it fails we will fix the result later. */ + cmd->result = DID_OK << 16; + + if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) { + cmd->result = DID_NO_CONNECT << 16; + cmd->scsi_done(cmd); + return 0; + } + + rq = esas2r_alloc_request(a); + if (unlikely(rq == NULL)) { + esas2r_debug("esas2r_alloc_request failed"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + rq->cmd = cmd; + bufflen = scsi_bufflen(cmd); + + if (likely(bufflen != 0)) { + if (cmd->sc_data_direction == DMA_TO_DEVICE) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); + } + + memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len); + rq->vrq->scsi.length = cpu_to_le32(bufflen); + rq->target_id = cmd->device->id; + rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); + rq->sense_buf = cmd->sense_buffer; + rq->sense_len = SCSI_SENSE_BUFFERSIZE; + + esas2r_sgc_init(&sgc, a, rq, NULL); + + sgc.length = bufflen; + sgc.cur_offset = NULL; + + sgc.cur_sgel = scsi_sglist(cmd); + sgc.exp_offset = NULL; + sgc.num_sgel = scsi_dma_map(cmd); + sgc.sgel_count = 0; + + if (unlikely(sgc.num_sgel < 0)) { + esas2r_free_request(a, rq); + return SCSI_MLQUEUE_HOST_BUSY; + } + + sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc; + + if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) { + scsi_dma_unmap(cmd); + esas2r_free_request(a, rq); + return SCSI_MLQUEUE_HOST_BUSY; + } + + esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id, + (int)cmd->device->lun); + + esas2r_start_request(a, rq); + + return 0; +} + +static void complete_task_management_request(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + (*rq->task_management_status_ptr) = rq->req_stat; + esas2r_free_request(a, rq); +} + +/** + * Searches the specified queue for the specified queue for the command + * to abort. + * + * @param [in] a + * @param [in] abort_request + * @param [in] cmd + * t + * @return 0 on failure, 1 if command was not found, 2 if command was found + */ +static int esas2r_check_active_queue(struct esas2r_adapter *a, + struct esas2r_request **abort_request, + struct scsi_cmnd *cmd, + struct list_head *queue) +{ + bool found = false; + struct esas2r_request *ar = *abort_request; + struct esas2r_request *rq; + struct list_head *element, *next; + + list_for_each_safe(element, next, queue) { + + rq = list_entry(element, struct esas2r_request, req_list); + + if (rq->cmd == cmd) { + + /* Found the request. See what to do with it. */ + if (queue == &a->active_list) { + /* + * We are searching the active queue, which + * means that we need to send an abort request + * to the firmware. + */ + ar = esas2r_alloc_request(a); + if (ar == NULL) { + esas2r_log_dev(ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "unable to allocate an abort request for cmd %p", + cmd); + return 0; /* Failure */ + } + + /* + * Task management request must be formatted + * with a lock held. + */ + ar->sense_len = 0; + ar->vrq->scsi.length = 0; + ar->target_id = rq->target_id; + ar->vrq->scsi.flags |= cpu_to_le32( + (u8)le32_to_cpu(rq->vrq->scsi.flags)); + + memset(ar->vrq->scsi.cdb, 0, + sizeof(ar->vrq->scsi.cdb)); + + ar->vrq->scsi.flags |= cpu_to_le32( + FCP_CMND_TRM); + ar->vrq->scsi.u.abort_handle = + rq->vrq->scsi.handle; + } else { + /* + * The request is pending but not active on + * the firmware. Just free it now and we'll + * report the successful abort below. + */ + list_del_init(&rq->req_list); + esas2r_free_request(a, rq); + } + + found = true; + break; + } + + } + + if (!found) + return 1; /* Not found */ + + return 2; /* found */ + + +} + +int esas2r_eh_abort(struct scsi_cmnd *cmd) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *abort_request = NULL; + unsigned long flags; + struct list_head *queue; + int result; + + esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { + cmd->result = DID_ABORT << 16; + + scsi_set_resid(cmd, 0); + + cmd->scsi_done(cmd); + + return 0; + } + + spin_lock_irqsave(&a->queue_lock, flags); + + /* + * Run through the defer and active queues looking for the request + * to abort. + */ + + queue = &a->defer_list; + +check_active_queue: + + result = esas2r_check_active_queue(a, &abort_request, cmd, queue); + + if (!result) { + spin_unlock_irqrestore(&a->queue_lock, flags); + return FAILED; + } else if (result == 2 && (queue == &a->defer_list)) { + queue = &a->active_list; + goto check_active_queue; + } + + spin_unlock_irqrestore(&a->queue_lock, flags); + + if (abort_request) { + u8 task_management_status = RS_PENDING; + + /* + * the request is already active, so we need to tell + * the firmware to abort it and wait for the response. + */ + + abort_request->comp_cb = complete_task_management_request; + abort_request->task_management_status_ptr = + &task_management_status; + + esas2r_start_request(a, abort_request); + + if (atomic_read(&a->disable_cnt) == 0) + esas2r_do_deferred_processes(a); + + while (task_management_status == RS_PENDING) + msleep(10); + + /* + * Once we get here, the original request will have been + * completed by the firmware and the abort request will have + * been cleaned up. we're done! + */ + + return SUCCESS; + } + + /* + * If we get here, either we found the inactive request and + * freed it, or we didn't find it at all. Either way, success! + */ + + cmd->result = DID_ABORT << 16; + + scsi_set_resid(cmd, 0); + + cmd->scsi_done(cmd); + + return SUCCESS; +} + +static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + + if (host_reset) + esas2r_reset_adapter(a); + else + esas2r_reset_bus(a); + + /* above call sets the AF_OS_RESET flag. wait for it to clear. */ + + while (test_bit(AF_OS_RESET, &a->flags)) { + msleep(10); + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + + return SUCCESS; +} + +int esas2r_host_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd); + + return esas2r_host_bus_reset(cmd, true); +} + +int esas2r_bus_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd); + + return esas2r_host_bus_reset(cmd, false); +} + +static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset) +{ + struct esas2r_adapter *a = + (struct esas2r_adapter *)cmd->device->host->hostdata; + struct esas2r_request *rq; + u8 task_management_status = RS_PENDING; + bool completed; + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + +retry: + rq = esas2r_alloc_request(a); + if (rq == NULL) { + if (target_reset) { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to allocate a request for a " + "target reset (%d)!", + cmd->device->id); + } else { + esas2r_log(ESAS2R_LOG_CRIT, + "unable to allocate a request for a " + "device reset (%d:%d)!", + cmd->device->id, + cmd->device->lun); + } + + + return FAILED; + } + + rq->target_id = cmd->device->id; + rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun); + rq->req_stat = RS_PENDING; + + rq->comp_cb = complete_task_management_request; + rq->task_management_status_ptr = &task_management_status; + + if (target_reset) { + esas2r_debug("issuing target reset (%p) to id %d", rq, + cmd->device->id); + completed = esas2r_send_task_mgmt(a, rq, 0x20); + } else { + esas2r_debug("issuing device reset (%p) to id %d lun %d", rq, + cmd->device->id, cmd->device->lun); + completed = esas2r_send_task_mgmt(a, rq, 0x10); + } + + if (completed) { + /* Task management cmd completed right away, need to free it. */ + + esas2r_free_request(a, rq); + } else { + /* + * Wait for firmware to complete the request. Completion + * callback will free it. + */ + while (task_management_status == RS_PENDING) + msleep(10); + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) + return FAILED; + + if (task_management_status == RS_BUSY) { + /* + * Busy, probably because we are flashing. Wait a bit and + * try again. + */ + msleep(100); + goto retry; + } + + return SUCCESS; +} + +int esas2r_device_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd); + + return esas2r_dev_targ_reset(cmd, false); + +} + +int esas2r_target_reset(struct scsi_cmnd *cmd) +{ + esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd); + + return esas2r_dev_targ_reset(cmd, true); +} + +int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason) +{ + esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth); + + scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth); + + return dev->queue_depth; +} + +int esas2r_change_queue_type(struct scsi_device *dev, int type) +{ + esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type); + + if (dev->tagged_supported) { + scsi_set_tag_type(dev, type); + + if (type) + scsi_activate_tcq(dev, dev->queue_depth); + else + scsi_deactivate_tcq(dev, dev->queue_depth); + } else { + type = 0; + } + + return type; +} + +int esas2r_slave_alloc(struct scsi_device *dev) +{ + return 0; +} + +int esas2r_slave_configure(struct scsi_device *dev) +{ + esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev), + "esas2r_slave_configure()"); + + if (dev->tagged_supported) { + scsi_set_tag_type(dev, MSG_SIMPLE_TAG); + scsi_activate_tcq(dev, cmd_per_lun); + } else { + scsi_set_tag_type(dev, 0); + scsi_deactivate_tcq(dev, cmd_per_lun); + } + + return 0; +} + +void esas2r_slave_destroy(struct scsi_device *dev) +{ + esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev), + "esas2r_slave_destroy()"); +} + +void esas2r_log_request_failure(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + u8 reqstatus = rq->req_stat; + + if (reqstatus == RS_SUCCESS) + return; + + if (rq->vrq->scsi.function == VDA_FUNC_SCSI) { + if (reqstatus == RS_SCSI_ERROR) { + if (rq->func_rsp.scsi_rsp.sense_len >= 13) { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x", + rq->sense_buf[2], rq->sense_buf[12], + rq->sense_buf[13], + rq->vrq->scsi.cdb[0]); + } else { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - SCSI error CDB:%x\n", + rq->vrq->scsi.cdb[0]); + } + } else if ((rq->vrq->scsi.cdb[0] != INQUIRY + && rq->vrq->scsi.cdb[0] != REPORT_LUNS) + || (reqstatus != RS_SEL + && reqstatus != RS_SEL2)) { + if ((reqstatus == RS_UNDERRUN) && + (rq->vrq->scsi.cdb[0] == INQUIRY)) { + /* Don't log inquiry underruns */ + } else { + esas2r_log(ESAS2R_LOG_WARN, + "request failure - cdb:%x reqstatus:%d target:%d", + rq->vrq->scsi.cdb[0], reqstatus, + rq->target_id); + } + } + } +} + +void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + u32 starttime; + u32 timeout; + + starttime = jiffies_to_msecs(jiffies); + timeout = rq->timeout ? rq->timeout : 5000; + + while (true) { + esas2r_polled_interrupt(a); + + if (rq->req_stat != RS_STARTED) + break; + + schedule_timeout_interruptible(msecs_to_jiffies(100)); + + if ((jiffies_to_msecs(jiffies) - starttime) > timeout) { + esas2r_hdebug("request TMO"); + esas2r_bugon(); + + rq->req_stat = RS_TIMEOUT; + + esas2r_local_reset_adapter(a); + return; + } + } +} + +u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo) +{ + u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1); + u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE; + + if (a->window_base != base) { + esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP, + base | MVRPW1R_ENABLE); + esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP); + a->window_base = base; + } + + return offset; +} + +/* Read a block of data from chip memory */ +bool esas2r_read_mem_block(struct esas2r_adapter *a, + void *to, + u32 from, + u32 size) +{ + u8 *end = (u8 *)to; + + while (size) { + u32 len; + u32 offset; + u32 iatvr; + + iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE); + + esas2r_map_data_window(a, iatvr); + + offset = from & (MW_DATA_WINDOW_SIZE - 1); + len = size; + + if (len > MW_DATA_WINDOW_SIZE - offset) + len = MW_DATA_WINDOW_SIZE - offset; + + from += len; + size -= len; + + while (len--) { + *end++ = esas2r_read_data_byte(a, offset); + offset++; + } + } + + return true; +} + +void esas2r_nuxi_mgt_data(u8 function, void *data) +{ + struct atto_vda_grp_info *g; + struct atto_vda_devinfo *d; + struct atto_vdapart_info *p; + struct atto_vda_dh_info *h; + struct atto_vda_metrics_info *m; + struct atto_vda_schedule_info *s; + struct atto_vda_buzzer_info *b; + u8 i; + + switch (function) { + case VDAMGT_BUZZER_INFO: + case VDAMGT_BUZZER_SET: + + b = (struct atto_vda_buzzer_info *)data; + + b->duration = le32_to_cpu(b->duration); + break; + + case VDAMGT_SCHEDULE_INFO: + case VDAMGT_SCHEDULE_EVENT: + + s = (struct atto_vda_schedule_info *)data; + + s->id = le32_to_cpu(s->id); + + break; + + case VDAMGT_DEV_INFO: + case VDAMGT_DEV_CLEAN: + case VDAMGT_DEV_PT_INFO: + case VDAMGT_DEV_FEATURES: + case VDAMGT_DEV_PT_FEATURES: + case VDAMGT_DEV_OPERATION: + + d = (struct atto_vda_devinfo *)data; + + d->capacity = le64_to_cpu(d->capacity); + d->block_size = le32_to_cpu(d->block_size); + d->ses_dev_index = le16_to_cpu(d->ses_dev_index); + d->target_id = le16_to_cpu(d->target_id); + d->lun = le16_to_cpu(d->lun); + d->features = le16_to_cpu(d->features); + break; + + case VDAMGT_GRP_INFO: + case VDAMGT_GRP_CREATE: + case VDAMGT_GRP_DELETE: + case VDAMGT_ADD_STORAGE: + case VDAMGT_MEMBER_ADD: + case VDAMGT_GRP_COMMIT: + case VDAMGT_GRP_REBUILD: + case VDAMGT_GRP_COMMIT_INIT: + case VDAMGT_QUICK_RAID: + case VDAMGT_GRP_FEATURES: + case VDAMGT_GRP_COMMIT_INIT_AUTOMAP: + case VDAMGT_QUICK_RAID_INIT_AUTOMAP: + case VDAMGT_SPARE_LIST: + case VDAMGT_SPARE_ADD: + case VDAMGT_SPARE_REMOVE: + case VDAMGT_LOCAL_SPARE_ADD: + case VDAMGT_GRP_OPERATION: + + g = (struct atto_vda_grp_info *)data; + + g->capacity = le64_to_cpu(g->capacity); + g->block_size = le32_to_cpu(g->block_size); + g->interleave = le32_to_cpu(g->interleave); + g->features = le16_to_cpu(g->features); + + for (i = 0; i < 32; i++) + g->members[i] = le16_to_cpu(g->members[i]); + + break; + + case VDAMGT_PART_INFO: + case VDAMGT_PART_MAP: + case VDAMGT_PART_UNMAP: + case VDAMGT_PART_AUTOMAP: + case VDAMGT_PART_SPLIT: + case VDAMGT_PART_MERGE: + + p = (struct atto_vdapart_info *)data; + + p->part_size = le64_to_cpu(p->part_size); + p->start_lba = le32_to_cpu(p->start_lba); + p->block_size = le32_to_cpu(p->block_size); + p->target_id = le16_to_cpu(p->target_id); + break; + + case VDAMGT_DEV_HEALTH_REQ: + + h = (struct atto_vda_dh_info *)data; + + h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt); + h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt); + break; + + case VDAMGT_DEV_METRICS: + + m = (struct atto_vda_metrics_info *)data; + + for (i = 0; i < 32; i++) + m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]); + + break; + + default: + break; + } +} + +void esas2r_nuxi_cfg_data(u8 function, void *data) +{ + struct atto_vda_cfg_init *ci; + + switch (function) { + case VDA_CFG_INIT: + case VDA_CFG_GET_INIT: + case VDA_CFG_GET_INIT2: + + ci = (struct atto_vda_cfg_init *)data; + + ci->date_time.year = le16_to_cpu(ci->date_time.year); + ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size); + ci->vda_version = le32_to_cpu(ci->vda_version); + ci->epoch_time = le32_to_cpu(ci->epoch_time); + ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel); + ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend); + break; + + default: + break; + } +} + +void esas2r_nuxi_ae_data(union atto_vda_ae *ae) +{ + struct atto_vda_ae_raid *r = &ae->raid; + struct atto_vda_ae_lu *l = &ae->lu; + + switch (ae->hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + + r->dwflags = le32_to_cpu(r->dwflags); + break; + + case VDAAE_HDR_TYPE_LU: + + l->dwevent = le32_to_cpu(l->dwevent); + l->wphys_target_id = le16_to_cpu(l->wphys_target_id); + l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id); + + if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id) + + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) { + l->id.tgtlun_raid.dwinterleave + = le32_to_cpu(l->id.tgtlun_raid.dwinterleave); + l->id.tgtlun_raid.dwblock_size + = le32_to_cpu(l->id.tgtlun_raid.dwblock_size); + } + + break; + + case VDAAE_HDR_TYPE_DISK: + default: + break; + } +} + +void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + unsigned long flags; + + esas2r_rq_destroy_request(rq, a); + spin_lock_irqsave(&a->request_lock, flags); + list_add(&rq->comp_list, &a->avail_request); + spin_unlock_irqrestore(&a->request_lock, flags); +} + +struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a) +{ + struct esas2r_request *rq; + unsigned long flags; + + spin_lock_irqsave(&a->request_lock, flags); + + if (unlikely(list_empty(&a->avail_request))) { + spin_unlock_irqrestore(&a->request_lock, flags); + return NULL; + } + + rq = list_first_entry(&a->avail_request, struct esas2r_request, + comp_list); + list_del(&rq->comp_list); + spin_unlock_irqrestore(&a->request_lock, flags); + esas2r_rq_init_request(rq, a); + + return rq; + +} + +void esas2r_complete_request_cb(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + esas2r_debug("completing request %p\n", rq); + + scsi_dma_unmap(rq->cmd); + + if (unlikely(rq->req_stat != RS_SUCCESS)) { + esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id, + rq->req_stat, + rq->func_rsp.scsi_rsp.scsi_stat, + rq->cmd); + + rq->cmd->result = + ((esas2r_req_status_to_error(rq->req_stat) << 16) + | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK)); + + if (rq->req_stat == RS_UNDERRUN) + scsi_set_resid(rq->cmd, + le32_to_cpu(rq->func_rsp.scsi_rsp. + residual_length)); + else + scsi_set_resid(rq->cmd, 0); + } + + rq->cmd->scsi_done(rq->cmd); + + esas2r_free_request(a, rq); +} + +/* Run tasklet to handle stuff outside of interrupt context. */ +void esas2r_adapter_tasklet(unsigned long context) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)context; + + if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) { + clear_bit(AF2_TIMER_TICK, &a->flags2); + esas2r_timer_tick(a); + } + + if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) { + clear_bit(AF2_INT_PENDING, &a->flags2); + esas2r_adapter_interrupt(a); + } + + if (esas2r_is_tasklet_pending(a)) + esas2r_do_tasklet_tasks(a); + + if (esas2r_is_tasklet_pending(a) + || (test_bit(AF2_INT_PENDING, &a->flags2)) + || (test_bit(AF2_TIMER_TICK, &a->flags2))) { + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); + esas2r_schedule_tasklet(a); + } else { + clear_bit(AF_TASKLET_SCHEDULED, &a->flags); + } +} + +static void esas2r_timer_callback(unsigned long context); + +void esas2r_kickoff_timer(struct esas2r_adapter *a) +{ + init_timer(&a->timer); + + a->timer.function = esas2r_timer_callback; + a->timer.data = (unsigned long)a; + a->timer.expires = jiffies + + msecs_to_jiffies(100); + + add_timer(&a->timer); +} + +static void esas2r_timer_callback(unsigned long context) +{ + struct esas2r_adapter *a = (struct esas2r_adapter *)context; + + set_bit(AF2_TIMER_TICK, &a->flags2); + + esas2r_schedule_tasklet(a); + + esas2r_kickoff_timer(a); +} + +/* + * Firmware events need to be handled outside of interrupt context + * so we schedule a delayed_work to handle them. + */ + +static void +esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event) +{ + unsigned long flags; + struct esas2r_adapter *a = fw_event->a; + + spin_lock_irqsave(&a->fw_event_lock, flags); + list_del(&fw_event->list); + kfree(fw_event); + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void +esas2r_fw_event_off(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->fw_event_lock, flags); + a->fw_events_off = 1; + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void +esas2r_fw_event_on(struct esas2r_adapter *a) +{ + unsigned long flags; + + spin_lock_irqsave(&a->fw_event_lock, flags); + a->fw_events_off = 0; + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id) +{ + int ret; + struct scsi_device *scsi_dev; + + scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); + + if (scsi_dev) { + esas2r_log_dev( + ESAS2R_LOG_WARN, + &(scsi_dev-> + sdev_gendev), + "scsi device already exists at id %d", target_id); + + scsi_device_put(scsi_dev); + } else { + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(a->host-> + shost_gendev), + "scsi_add_device() called for 0:%d:0", + target_id); + + ret = scsi_add_device(a->host, 0, target_id, 0); + if (ret) { + esas2r_log_dev( + ESAS2R_LOG_CRIT, + &(a->host-> + shost_gendev), + "scsi_add_device failed with %d for id %d", + ret, target_id); + } + } +} + +static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id) +{ + struct scsi_device *scsi_dev; + + scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0); + + if (scsi_dev) { + scsi_device_set_state(scsi_dev, SDEV_OFFLINE); + + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(scsi_dev-> + sdev_gendev), + "scsi_remove_device() called for 0:%d:0", + target_id); + + scsi_remove_device(scsi_dev); + + esas2r_log_dev( + ESAS2R_LOG_INFO, + &(scsi_dev-> + sdev_gendev), + "scsi_device_put() called"); + + scsi_device_put(scsi_dev); + } else { + esas2r_log_dev( + ESAS2R_LOG_WARN, + &(a->host->shost_gendev), + "no target found at id %d", + target_id); + } +} + +/* + * Sends a firmware asynchronous event to anyone who happens to be + * listening on the defined ATTO VDA event ports. + */ +static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event) +{ + struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data; + char *type; + + switch (ae->vda_ae.hdr.bytype) { + case VDAAE_HDR_TYPE_RAID: + type = "RAID group state change"; + break; + + case VDAAE_HDR_TYPE_LU: + type = "Mapped destination LU change"; + break; + + case VDAAE_HDR_TYPE_DISK: + type = "Physical disk inventory change"; + break; + + case VDAAE_HDR_TYPE_RESET: + type = "Firmware reset"; + break; + + case VDAAE_HDR_TYPE_LOG_INFO: + type = "Event Log message (INFO level)"; + break; + + case VDAAE_HDR_TYPE_LOG_WARN: + type = "Event Log message (WARN level)"; + break; + + case VDAAE_HDR_TYPE_LOG_CRIT: + type = "Event Log message (CRIT level)"; + break; + + case VDAAE_HDR_TYPE_LOG_FAIL: + type = "Event Log message (FAIL level)"; + break; + + case VDAAE_HDR_TYPE_NVC: + type = "NVCache change"; + break; + + case VDAAE_HDR_TYPE_TLG_INFO: + type = "Time stamped log message (INFO level)"; + break; + + case VDAAE_HDR_TYPE_TLG_WARN: + type = "Time stamped log message (WARN level)"; + break; + + case VDAAE_HDR_TYPE_TLG_CRIT: + type = "Time stamped log message (CRIT level)"; + break; + + case VDAAE_HDR_TYPE_PWRMGT: + type = "Power management"; + break; + + case VDAAE_HDR_TYPE_MUTE: + type = "Mute button pressed"; + break; + + case VDAAE_HDR_TYPE_DEV: + type = "Device attribute change"; + break; + + default: + type = "Unknown"; + break; + } + + esas2r_log(ESAS2R_LOG_WARN, + "An async event of type \"%s\" was received from the firmware. The event contents are:", + type); + esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae, + ae->vda_ae.hdr.bylength); + +} + +static void +esas2r_firmware_event_work(struct work_struct *work) +{ + struct esas2r_fw_event_work *fw_event = + container_of(work, struct esas2r_fw_event_work, work.work); + + struct esas2r_adapter *a = fw_event->a; + + u16 target_id = *(u16 *)&fw_event->data[0]; + + if (a->fw_events_off) + goto done; + + switch (fw_event->type) { + case fw_event_null: + break; /* do nothing */ + + case fw_event_lun_change: + esas2r_remove_device(a, target_id); + esas2r_add_device(a, target_id); + break; + + case fw_event_present: + esas2r_add_device(a, target_id); + break; + + case fw_event_not_present: + esas2r_remove_device(a, target_id); + break; + + case fw_event_vda_ae: + esas2r_send_ae_event(fw_event); + break; + } + +done: + esas2r_free_fw_event(fw_event); +} + +void esas2r_queue_fw_event(struct esas2r_adapter *a, + enum fw_event_type type, + void *data, + int data_sz) +{ + struct esas2r_fw_event_work *fw_event; + unsigned long flags; + + fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC); + if (!fw_event) { + esas2r_log(ESAS2R_LOG_WARN, + "esas2r_queue_fw_event failed to alloc"); + return; + } + + if (type == fw_event_vda_ae) { + struct esas2r_vda_ae *ae = + (struct esas2r_vda_ae *)fw_event->data; + + ae->signature = ESAS2R_VDA_EVENT_SIG; + ae->bus_number = a->pcid->bus->number; + ae->devfn = a->pcid->devfn; + memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae)); + } else { + memcpy(fw_event->data, data, data_sz); + } + + fw_event->type = type; + fw_event->a = a; + + spin_lock_irqsave(&a->fw_event_lock, flags); + list_add_tail(&fw_event->list, &a->fw_event_list); + INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work); + queue_delayed_work_on( + smp_processor_id(), a->fw_event_q, &fw_event->work, + msecs_to_jiffies(1)); + spin_unlock_irqrestore(&a->fw_event_lock, flags); +} + +void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id, + u8 state) +{ + if (state == TS_LUN_CHANGE) + esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id, + sizeof(targ_id)); + else if (state == TS_PRESENT) + esas2r_queue_fw_event(a, fw_event_present, &targ_id, + sizeof(targ_id)); + else if (state == TS_NOT_PRESENT) + esas2r_queue_fw_event(a, fw_event_not_present, &targ_id, + sizeof(targ_id)); +} + +/* Translate status to a Linux SCSI mid-layer error code */ +int esas2r_req_status_to_error(u8 req_stat) +{ + switch (req_stat) { + case RS_OVERRUN: + case RS_UNDERRUN: + case RS_SUCCESS: + /* + * NOTE: SCSI mid-layer wants a good status for a SCSI error, because + * it will check the scsi_stat value in the completion anyway. + */ + case RS_SCSI_ERROR: + return DID_OK; + + case RS_SEL: + case RS_SEL2: + return DID_NO_CONNECT; + + case RS_RESET: + return DID_RESET; + + case RS_ABORTED: + return DID_ABORT; + + case RS_BUSY: + return DID_BUS_BUSY; + } + + /* everything else is just an error. */ + + return DID_ERROR; +} + +module_init(esas2r_init); +module_exit(esas2r_exit); diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c new file mode 100644 index 00000000000..bf45beaad43 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_targdb.c @@ -0,0 +1,306 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_targdb.c + * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include "esas2r.h" + +void esas2r_targ_db_initialize(struct esas2r_adapter *a) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + memset(t, 0, sizeof(struct esas2r_target)); + + t->target_state = TS_NOT_PRESENT; + t->buffered_target_state = TS_NOT_PRESENT; + t->new_target_state = TS_INVALID; + } +} + +void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify) +{ + struct esas2r_target *t; + unsigned long flags; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->target_state != TS_PRESENT) + continue; + + spin_lock_irqsave(&a->mem_lock, flags); + esas2r_targ_db_remove(a, t); + spin_unlock_irqrestore(&a->mem_lock, flags); + + if (notify) { + esas2r_trace("remove id:%d", esas2r_targ_get_id(t, + a)); + esas2r_target_state_changed(a, esas2r_targ_get_id(t, + a), + TS_NOT_PRESENT); + } + } +} + +void esas2r_targ_db_report_changes(struct esas2r_adapter *a) +{ + struct esas2r_target *t; + unsigned long flags; + + esas2r_trace_enter(); + + if (test_bit(AF_DISC_PENDING, &a->flags)) { + esas2r_trace_exit(); + return; + } + + for (t = a->targetdb; t < a->targetdb_end; t++) { + u8 state = TS_INVALID; + + spin_lock_irqsave(&a->mem_lock, flags); + if (t->buffered_target_state != t->target_state) + state = t->buffered_target_state = t->target_state; + + spin_unlock_irqrestore(&a->mem_lock, flags); + if (state != TS_INVALID) { + esas2r_trace("targ_db_report_changes:%d", + esas2r_targ_get_id( + t, + a)); + esas2r_trace("state:%d", state); + + esas2r_target_state_changed(a, + esas2r_targ_get_id(t, + a), + state); + } + } + + esas2r_trace_exit(); +} + +struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a, + struct esas2r_disc_context * + dc) +{ + struct esas2r_target *t; + + esas2r_trace_enter(); + + if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { + esas2r_bugon(); + esas2r_trace_exit(); + return NULL; + } + + t = a->targetdb + dc->curr_virt_id; + + if (t->target_state == TS_PRESENT) { + esas2r_trace_exit(); + return NULL; + } + + esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name, + esas2r_targ_get_id( + t, + a)); + + if (dc->interleave == 0 + || dc->block_size == 0) { + /* these are invalid values, don't create the target entry. */ + + esas2r_hdebug("invalid RAID group dimensions"); + + esas2r_trace_exit(); + + return NULL; + } + + t->block_size = dc->block_size; + t->inter_byte = dc->interleave; + t->inter_block = dc->interleave / dc->block_size; + t->virt_targ_id = dc->curr_virt_id; + t->phys_targ_id = ESAS2R_TARG_ID_INV; + + t->flags &= ~TF_PASS_THRU; + t->flags |= TF_USED; + + t->identifier_len = 0; + + t->target_state = TS_PRESENT; + + return t; +} + +struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a, + struct esas2r_disc_context *dc, + u8 *ident, + u8 ident_len) +{ + struct esas2r_target *t; + + esas2r_trace_enter(); + + if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) { + esas2r_bugon(); + esas2r_trace_exit(); + return NULL; + } + + /* see if we found this device before. */ + + t = esas2r_targ_db_find_by_ident(a, ident, ident_len); + + if (t == NULL) { + t = a->targetdb + dc->curr_virt_id; + + if (ident_len > sizeof(t->identifier) + || t->target_state == TS_PRESENT) { + esas2r_trace_exit(); + return NULL; + } + } + + esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a), + dc->curr_virt_id, + dc->curr_phys_id); + + t->block_size = 0; + t->inter_byte = 0; + t->inter_block = 0; + t->virt_targ_id = dc->curr_virt_id; + t->phys_targ_id = dc->curr_phys_id; + t->identifier_len = ident_len; + + memcpy(t->identifier, ident, ident_len); + + t->flags |= TF_PASS_THRU | TF_USED; + + t->target_state = TS_PRESENT; + + return t; +} + +void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t) +{ + esas2r_trace_enter(); + + t->target_state = TS_NOT_PRESENT; + + esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a)); + + esas2r_trace_exit(); +} + +struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a, + u64 *sas_addr) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->sas_addr == *sas_addr) + return t; + + return NULL; +} + +struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a, + void *identifier, + u8 ident_len) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (ident_len == t->identifier_len + && memcmp(&t->identifier[0], identifier, + ident_len) == 0) + return t; + } + + return NULL; +} + +u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id) +{ + u16 id = target_id + 1; + + while (id < ESAS2R_MAX_TARGETS) { + struct esas2r_target *t = a->targetdb + id; + + if (t->target_state == TS_PRESENT) + break; + + id++; + } + + return id; +} + +struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a, + u16 virt_id) +{ + struct esas2r_target *t; + + for (t = a->targetdb; t < a->targetdb_end; t++) { + if (t->target_state != TS_PRESENT) + continue; + + if (t->virt_targ_id == virt_id) + return t; + } + + return NULL; +} + +u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a) +{ + u16 devcnt = 0; + struct esas2r_target *t; + unsigned long flags; + + spin_lock_irqsave(&a->mem_lock, flags); + for (t = a->targetdb; t < a->targetdb_end; t++) + if (t->target_state == TS_PRESENT) + devcnt++; + + spin_unlock_irqrestore(&a->mem_lock, flags); + + return devcnt; +} diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c new file mode 100644 index 00000000000..30028e56df6 --- /dev/null +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -0,0 +1,524 @@ +/* + * linux/drivers/scsi/esas2r/esas2r_vda.c + * esas2r driver VDA firmware interface functions + * + * Copyright (c) 2001-2013 ATTO Technology, Inc. + * (mailto:linuxdrivers@attotech.com) + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + * + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +#include "esas2r.h" + +static u8 esas2r_vdaioctl_versions[] = { + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_FLASH_VER, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_CLI_VER, + ATTO_VDA_VER_UNSUPPORTED, + ATTO_VDA_CFG_VER, + ATTO_VDA_MGT_VER, + ATTO_VDA_GSV_VER +}; + +static void clear_vda_request(struct esas2r_request *rq); + +static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq); + +/* Prepare a VDA IOCTL request to be sent to the firmware. */ +bool esas2r_process_vda_ioctl(struct esas2r_adapter *a, + struct atto_ioctl_vda *vi, + struct esas2r_request *rq, + struct esas2r_sg_context *sgc) +{ + u32 datalen = 0; + struct atto_vda_sge *firstsg = NULL; + u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions); + + vi->status = ATTO_STS_SUCCESS; + vi->vda_status = RS_PENDING; + + if (vi->function >= vercnt) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (vi->version > esas2r_vdaioctl_versions[vi->function]) { + vi->status = ATTO_STS_INV_VERSION; + return false; + } + + if (test_bit(AF_DEGRADED_MODE, &a->flags)) { + vi->status = ATTO_STS_DEGRADED; + return false; + } + + if (vi->function != VDA_FUNC_SCSI) + clear_vda_request(rq); + + rq->vrq->scsi.function = vi->function; + rq->interrupt_cb = esas2r_complete_vda_ioctl; + rq->interrupt_cx = vi; + + switch (vi->function) { + case VDA_FUNC_FLASH: + + if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD + && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE + && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO) + datalen = vi->data_length; + + rq->vrq->flash.length = cpu_to_le32(datalen); + rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; + + memcpy(rq->vrq->flash.data.file.file_name, + vi->cmd.flash.data.file.file_name, + sizeof(vi->cmd.flash.data.file.file_name)); + + firstsg = rq->vrq->flash.data.file.sge; + break; + + case VDA_FUNC_CLI: + + datalen = vi->data_length; + + rq->vrq->cli.cmd_rsp_len = + cpu_to_le32(vi->cmd.cli.cmd_rsp_len); + rq->vrq->cli.length = cpu_to_le32(datalen); + + firstsg = rq->vrq->cli.sge; + break; + + case VDA_FUNC_MGT: + { + u8 *cmdcurr_offset = sgc->cur_offset + - offsetof(struct atto_ioctl_vda, data) + + offsetof(struct atto_ioctl_vda, cmd) + + offsetof(struct atto_ioctl_vda_mgt_cmd, + data); + /* + * build the data payload SGL here first since + * esas2r_sgc_init() will modify the S/G list offset for the + * management SGL (which is built below where the data SGL is + * usually built). + */ + + if (vi->data_length) { + u32 payldlen = 0; + + if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ + || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) { + rq->vrq->mgt.payld_sglst_offset = + (u8)offsetof(struct atto_vda_mgmt_req, + payld_sge); + + payldlen = vi->data_length; + datalen = vi->cmd.mgt.data_length; + } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2 + || vi->cmd.mgt.mgt_func == + VDAMGT_DEV_INFO2_BYADDR) { + datalen = vi->data_length; + cmdcurr_offset = sgc->cur_offset; + } else { + vi->status = ATTO_STS_INV_PARAM; + return false; + } + + /* Setup the length so building the payload SGL works */ + rq->vrq->mgt.length = cpu_to_le32(datalen); + + if (payldlen) { + rq->vrq->mgt.payld_length = + cpu_to_le32(payldlen); + + esas2r_sgc_init(sgc, a, rq, + rq->vrq->mgt.payld_sge); + sgc->length = payldlen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + vi->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + } else { + datalen = vi->cmd.mgt.data_length; + + rq->vrq->mgt.length = cpu_to_le32(datalen); + } + + /* + * Now that the payload SGL is built, if any, setup to build + * the management SGL. + */ + firstsg = rq->vrq->mgt.sge; + sgc->cur_offset = cmdcurr_offset; + + /* Finish initializing the management request. */ + rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func; + rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation; + rq->vrq->mgt.dev_index = + cpu_to_le32(vi->cmd.mgt.dev_index); + + esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); + break; + } + + case VDA_FUNC_CFG: + + if (vi->data_length + || vi->cmd.cfg.data_length == 0) { + vi->status = ATTO_STS_INV_PARAM; + return false; + } + + if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) { + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func; + rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length); + + if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { + memcpy(&rq->vrq->cfg.data, + &vi->cmd.cfg.data, + vi->cmd.cfg.data_length); + + esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, + &rq->vrq->cfg.data); + } else { + vi->status = ATTO_STS_INV_FUNC; + + return false; + } + + break; + + case VDA_FUNC_GSV: + + vi->cmd.gsv.rsp_len = vercnt; + + memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions, + vercnt); + + vi->vda_status = RS_SUCCESS; + break; + + default: + + vi->status = ATTO_STS_INV_FUNC; + return false; + } + + if (datalen) { + esas2r_sgc_init(sgc, a, rq, firstsg); + sgc->length = datalen; + + if (!esas2r_build_sg_list(a, rq, sgc)) { + vi->status = ATTO_STS_OUT_OF_RSRC; + return false; + } + } + + esas2r_start_request(a, rq); + + return true; +} + +static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, + struct esas2r_request *rq) +{ + struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx; + + vi->vda_status = rq->req_stat; + + switch (vi->function) { + case VDA_FUNC_FLASH: + + if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO + || vi->cmd.flash.sub_func == VDA_FLASH_FREAD) + vi->cmd.flash.data.file.file_size = + le32_to_cpu(rq->func_rsp.flash_rsp.file_size); + + break; + + case VDA_FUNC_MGT: + + vi->cmd.mgt.scan_generation = + rq->func_rsp.mgt_rsp.scan_generation; + vi->cmd.mgt.dev_index = le16_to_cpu( + rq->func_rsp.mgt_rsp.dev_index); + + if (vi->data_length == 0) + vi->cmd.mgt.data_length = + le32_to_cpu(rq->func_rsp.mgt_rsp.length); + + esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data); + break; + + case VDA_FUNC_CFG: + + if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { + struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; + struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; + char buf[sizeof(cfg->data.init.fw_release) + 1]; + + cfg->data_length = + cpu_to_le32(sizeof(struct atto_vda_cfg_init)); + cfg->data.init.vda_version = + le32_to_cpu(rsp->vda_version); + cfg->data.init.fw_build = rsp->fw_build; + + snprintf(buf, sizeof(buf), "%1.1u.%2.2u", + (int)LOBYTE(le16_to_cpu(rsp->fw_release)), + (int)HIBYTE(le16_to_cpu(rsp->fw_release))); + + memcpy(&cfg->data.init.fw_release, buf, + sizeof(cfg->data.init.fw_release)); + + if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') + cfg->data.init.fw_version = + cfg->data.init.fw_build; + else + cfg->data.init.fw_version = + cfg->data.init.fw_release; + } else { + esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func, + &vi->cmd.cfg.data); + } + + break; + + case VDA_FUNC_CLI: + + vi->cmd.cli.cmd_rsp_len = + le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len); + break; + + default: + + break; + } +} + +/* Build a flash VDA request. */ +void esas2r_build_flash_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 cksum, + u32 addr, + u32 length) +{ + struct atto_vda_flash_req *vrq = &rq->vrq->flash; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_FLASH; + + if (sub_func == VDA_FLASH_BEGINW + || sub_func == VDA_FLASH_WRITE + || sub_func == VDA_FLASH_READ) + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req, + data.sge); + + vrq->length = cpu_to_le32(length); + vrq->flash_addr = cpu_to_le32(addr); + vrq->checksum = cksum; + vrq->sub_func = sub_func; +} + +/* Build a VDA management request. */ +void esas2r_build_mgt_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u8 scan_gen, + u16 dev_index, + u32 length, + void *data) +{ + struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_MGT; + + vrq->mgt_func = sub_func; + vrq->scan_generation = scan_gen; + vrq->dev_index = cpu_to_le16(dev_index); + vrq->length = cpu_to_le32(length); + + if (vrq->length) { + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { + vrq->sg_list_offset = (u8)offsetof( + struct atto_vda_mgmt_req, sge); + + vrq->sge[0].length = cpu_to_le32(SGE_LAST | length); + vrq->sge[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } else { + vrq->sg_list_offset = (u8)offsetof( + struct atto_vda_mgmt_req, prde); + + vrq->prde[0].ctl_len = cpu_to_le32(length); + vrq->prde[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } + } + + if (data) { + esas2r_nuxi_mgt_data(sub_func, data); + + memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data, + length); + } +} + +/* Build a VDA asyncronous event (AE) request. */ +void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq) +{ + struct atto_vda_ae_req *vrq = &rq->vrq->ae; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_AE; + + vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); + + if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) { + vrq->sg_list_offset = + (u8)offsetof(struct atto_vda_ae_req, sge); + vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length); + vrq->sge[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } else { + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req, + prde); + vrq->prde[0].ctl_len = cpu_to_le32(vrq->length); + vrq->prde[0].address = cpu_to_le64( + rq->vrq_md->phys_addr + + sizeof(union atto_vda_req)); + } +} + +/* Build a VDA CLI request. */ +void esas2r_build_cli_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u32 cmd_rsp_len) +{ + struct atto_vda_cli_req *vrq = &rq->vrq->cli; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_CLI; + + vrq->length = cpu_to_le32(length); + vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len); + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge); +} + +/* Build a VDA IOCTL request. */ +void esas2r_build_ioctl_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u32 length, + u8 sub_func) +{ + struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_IOCTL; + + vrq->length = cpu_to_le32(length); + vrq->sub_func = sub_func; + vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge); +} + +/* Build a VDA configuration request. */ +void esas2r_build_cfg_req(struct esas2r_adapter *a, + struct esas2r_request *rq, + u8 sub_func, + u32 length, + void *data) +{ + struct atto_vda_cfg_req *vrq = &rq->vrq->cfg; + + clear_vda_request(rq); + + rq->vrq->scsi.function = VDA_FUNC_CFG; + + vrq->sub_func = sub_func; + vrq->length = cpu_to_le32(length); + + if (data) { + esas2r_nuxi_cfg_data(sub_func, data); + + memcpy(&vrq->data, data, length); + } +} + +static void clear_vda_request(struct esas2r_request *rq) +{ + u32 handle = rq->vrq->scsi.handle; + + memset(rq->vrq, 0, sizeof(*rq->vrq)); + + rq->vrq->scsi.handle = handle; + + rq->req_stat = RS_PENDING; + + /* since the data buffer is separate clear that too */ + + memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN); + + /* + * Setup next and prev pointer in case the request is not going through + * esas2r_start_request(). + */ + + INIT_LIST_HEAD(&rq->req_list); +} diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 34552bf1c02..55548dc5cec 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp) static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { - if (!ent->tag[0]) { + if (!ent->orig_tag[0]) { /* Non-tagged, slot already taken? */ if (lp->non_tagged_cmd) return -EBUSY; @@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, return -EBUSY; } - BUG_ON(lp->tagged_cmds[ent->tag[1]]); + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); - lp->tagged_cmds[ent->tag[1]] = ent; + lp->tagged_cmds[ent->orig_tag[1]] = ent; lp->num_tagged++; return 0; @@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, static void esp_free_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { - if (ent->tag[0]) { - BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent); - lp->tagged_cmds[ent->tag[1]] = NULL; + if (ent->orig_tag[0]) { + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); + lp->tagged_cmds[ent->orig_tag[1]] = NULL; lp->num_tagged--; } else { BUG_ON(lp->non_tagged_cmd != ent); @@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) ent->tag[0] = 0; ent->tag[1] = 0; } + ent->orig_tag[0] = ent->tag[0]; + ent->orig_tag[1] = ent->tag[1]; if (esp_alloc_lun_tag(ent, lp) < 0) continue; diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 28e22acf87e..cd68805e8d7 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -271,6 +271,7 @@ struct esp_cmd_entry { #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ u8 tag[2]; + u8 orig_tag[2]; u8 status; u8 message; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b5d92fc93c7..00ee0ed642a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -408,6 +408,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, } ctlr = fcoe_ctlr_device_priv(ctlr_dev); + ctlr->cdev = ctlr_dev; fcoe = fcoe_ctlr_priv(ctlr); dev_hold(netdev); @@ -490,7 +491,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) { struct net_device *netdev = fcoe->netdev; struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); rtnl_lock(); if (!fcoe->removed) @@ -501,7 +501,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) /* tear-down the FCoE controller */ fcoe_ctlr_destroy(fip); scsi_host_put(fip->lp->host); - fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); module_put(THIS_MODULE); } @@ -776,7 +775,6 @@ static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) struct fcoe_port *port; struct net_device *realdev; int rc; - struct netdev_fcoe_hbainfo fdmi; port = lport_priv(lport); fcoe = port->priv; @@ -790,9 +788,13 @@ static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) return; if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) { - memset(&fdmi, 0, sizeof(fdmi)); + struct netdev_fcoe_hbainfo *fdmi; + fdmi = kzalloc(sizeof(*fdmi), GFP_KERNEL); + if (!fdmi) + return; + rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev, - &fdmi); + fdmi); if (rc) { printk(KERN_INFO "fcoe: Failed to retrieve FDMI " "information from netdev.\n"); @@ -802,38 +804,39 @@ static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev) snprintf(fc_host_serial_number(lport->host), FC_SERIAL_NUMBER_SIZE, "%s", - fdmi.serial_number); + fdmi->serial_number); snprintf(fc_host_manufacturer(lport->host), FC_SERIAL_NUMBER_SIZE, "%s", - fdmi.manufacturer); + fdmi->manufacturer); snprintf(fc_host_model(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s", - fdmi.model); + fdmi->model); snprintf(fc_host_model_description(lport->host), FC_SYMBOLIC_NAME_SIZE, "%s", - fdmi.model_description); + fdmi->model_description); snprintf(fc_host_hardware_version(lport->host), FC_VERSION_STRING_SIZE, "%s", - fdmi.hardware_version); + fdmi->hardware_version); snprintf(fc_host_driver_version(lport->host), FC_VERSION_STRING_SIZE, "%s", - fdmi.driver_version); + fdmi->driver_version); snprintf(fc_host_optionrom_version(lport->host), FC_VERSION_STRING_SIZE, "%s", - fdmi.optionrom_version); + fdmi->optionrom_version); snprintf(fc_host_firmware_version(lport->host), FC_VERSION_STRING_SIZE, "%s", - fdmi.firmware_version); + fdmi->firmware_version); /* Enable FDMI lport states */ lport->fdmi_enabled = 1; + kfree(fdmi); } else { lport->fdmi_enabled = 0; printk(KERN_INFO "fcoe: No FDMI support.\n"); @@ -1438,22 +1441,28 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, ctlr = fcoe_to_ctlr(fcoe); lport = ctlr->lp; if (unlikely(!lport)) { - FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); + FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n"); goto err2; } if (!lport->link_up) goto err2; - FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " - "data:%p tail:%p end:%p sum:%d dev:%s", + FCOE_NETDEV_DBG(netdev, + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", skb->len, skb->data_len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->csum, skb->dev ? skb->dev->name : "<NULL>"); + + skb = skb_share_check(skb, GFP_ATOMIC); + + if (skb == NULL) + return NET_RX_DROP; + eh = eth_hdr(skb); if (is_fip_mode(ctlr) && - compare_ether_addr(eh->h_source, ctlr->dest_addr)) { + !ether_addr_equal(eh->h_source, ctlr->dest_addr)) { FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", eh->h_source); goto err; @@ -1538,13 +1547,13 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, wake_up_process(fps->thread); spin_unlock(&fps->fcoe_rx_list.lock); - return 0; + return NET_RX_SUCCESS; err: per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; put_cpu(); err2: kfree_skb(skb); - return -1; + return NET_RX_DROP; } /** @@ -1657,10 +1666,13 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) skb->priority = fcoe->priority; if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && - fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { - skb->vlan_tci = VLAN_TAG_PRESENT | - vlan_dev_vlan_id(fcoe->netdev); + fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) { + /* must set skb->dev before calling vlan_put_tag */ skb->dev = fcoe->realdev; + skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_dev_vlan_id(fcoe->netdev)); + if (!skb) + return -ENOMEM; } else skb->dev = fcoe->netdev; @@ -1783,13 +1795,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) lport = fr->fr_dev; if (unlikely(!lport)) { if (skb->destructor != fcoe_percpu_flush_done) - FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); + FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n"); kfree_skb(skb); return; } - FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " - "head:%p data:%p tail:%p end:%p sum:%d dev:%s", + FCOE_NETDEV_DBG(skb->dev, + "skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n", skb->len, skb->data_len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->csum, @@ -1860,7 +1872,7 @@ static int fcoe_percpu_receive_thread(void *arg) skb_queue_head_init(&tmp); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); retry: while (!kthread_should_stop()) { @@ -1977,7 +1989,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, { struct fcoe_ctlr_device *cdev; struct fc_lport *lport = NULL; - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct fcoe_ctlr *ctlr; struct fcoe_interface *fcoe; struct fcoe_port *port; @@ -2194,6 +2206,8 @@ out_nodev: */ static void fcoe_destroy_work(struct work_struct *work) { + struct fcoe_ctlr_device *cdev; + struct fcoe_ctlr *ctlr; struct fcoe_port *port; struct fcoe_interface *fcoe; struct Scsi_Host *shost; @@ -2224,10 +2238,15 @@ static void fcoe_destroy_work(struct work_struct *work) mutex_lock(&fcoe_config_mutex); fcoe = port->priv; + ctlr = fcoe_to_ctlr(fcoe); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + fcoe_if_destroy(port->lport); fcoe_interface_cleanup(fcoe); mutex_unlock(&fcoe_config_mutex); + + fcoe_ctlr_device_delete(cdev); } /** @@ -2335,7 +2354,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, rc = -EIO; rtnl_unlock(); fcoe_interface_cleanup(fcoe); - goto out_nortnl; + mutex_unlock(&fcoe_config_mutex); + fcoe_ctlr_device_delete(ctlr_dev); + goto out; } /* Make this the "master" N_Port */ @@ -2375,8 +2396,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, out_nodev: rtnl_unlock(); -out_nortnl: mutex_unlock(&fcoe_config_mutex); +out: return rc; } @@ -2612,14 +2633,18 @@ static int __init fcoe_init(void) skb_queue_head_init(&p->fcoe_rx_list); } + cpu_notifier_register_begin(); + for_each_online_cpu(cpu) fcoe_percpu_thread_create(cpu); /* Initialize per CPU interrupt thread */ - rc = register_hotcpu_notifier(&fcoe_cpu_notifier); + rc = __register_hotcpu_notifier(&fcoe_cpu_notifier); if (rc) goto out_free; + cpu_notifier_register_done(); + /* Setup link change notification */ fcoe_dev_setup(); @@ -2634,6 +2659,9 @@ out_free: for_each_online_cpu(cpu) { fcoe_percpu_thread_destroy(cpu); } + + cpu_notifier_register_done(); + mutex_unlock(&fcoe_config_mutex); destroy_workqueue(fcoe_wq); return rc; @@ -2666,11 +2694,15 @@ static void __exit fcoe_exit(void) } rtnl_unlock(); - unregister_hotcpu_notifier(&fcoe_cpu_notifier); + cpu_notifier_register_begin(); for_each_online_cpu(cpu) fcoe_percpu_thread_destroy(cpu); + __unregister_hotcpu_notifier(&fcoe_cpu_notifier); + + cpu_notifier_register_done(); + mutex_unlock(&fcoe_config_mutex); /* diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 08c3bc398da..34a1b1f333b 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -160,74 +160,113 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) } EXPORT_SYMBOL(fcoe_ctlr_init); +/** + * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The newly discovered FCF + * + * Called with fip->ctlr_mutex held + */ static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new) { struct fcoe_ctlr *fip = new->fip; - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); - struct fcoe_fcf_device temp, *fcf_dev; - int rc = 0; + struct fcoe_ctlr_device *ctlr_dev; + struct fcoe_fcf_device *temp, *fcf_dev; + int rc = -ENOMEM; LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", new->fabric_name, new->fcf_mac); - mutex_lock(&ctlr_dev->lock); - - temp.fabric_name = new->fabric_name; - temp.switch_name = new->switch_name; - temp.fc_map = new->fc_map; - temp.vfid = new->vfid; - memcpy(temp.mac, new->fcf_mac, ETH_ALEN); - temp.priority = new->pri; - temp.fka_period = new->fka_period; - temp.selected = 0; /* default to unselected */ - - fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp); - if (unlikely(!fcf_dev)) { - rc = -ENOMEM; + temp = kzalloc(sizeof(*temp), GFP_KERNEL); + if (!temp) goto out; - } + + temp->fabric_name = new->fabric_name; + temp->switch_name = new->switch_name; + temp->fc_map = new->fc_map; + temp->vfid = new->vfid; + memcpy(temp->mac, new->fcf_mac, ETH_ALEN); + temp->priority = new->pri; + temp->fka_period = new->fka_period; + temp->selected = 0; /* default to unselected */ /* - * The fcoe_sysfs layer can return a CONNECTED fcf that - * has a priv (fcf was never deleted) or a CONNECTED fcf - * that doesn't have a priv (fcf was deleted). However, - * libfcoe will always delete FCFs before trying to add - * them. This is ensured because both recv_adv and - * age_fcfs are protected by the the fcoe_ctlr's mutex. - * This means that we should never get a FCF with a - * non-NULL priv pointer. + * If ctlr_dev doesn't exist then it means we're a libfcoe user + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device. + * fnic would be an example of a driver with this behavior. In this + * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we + * don't want to make sysfs changes. */ - BUG_ON(fcf_dev->priv); - fcf_dev->priv = new; - new->fcf_dev = fcf_dev; + ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); + if (ctlr_dev) { + mutex_lock(&ctlr_dev->lock); + fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp); + if (unlikely(!fcf_dev)) { + rc = -ENOMEM; + mutex_unlock(&ctlr_dev->lock); + goto out; + } + + /* + * The fcoe_sysfs layer can return a CONNECTED fcf that + * has a priv (fcf was never deleted) or a CONNECTED fcf + * that doesn't have a priv (fcf was deleted). However, + * libfcoe will always delete FCFs before trying to add + * them. This is ensured because both recv_adv and + * age_fcfs are protected by the the fcoe_ctlr's mutex. + * This means that we should never get a FCF with a + * non-NULL priv pointer. + */ + BUG_ON(fcf_dev->priv); + + fcf_dev->priv = new; + new->fcf_dev = fcf_dev; + mutex_unlock(&ctlr_dev->lock); + } list_add(&new->list, &fip->fcfs); fip->fcf_count++; + rc = 0; out: - mutex_unlock(&ctlr_dev->lock); + kfree(temp); return rc; } +/** + * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The FCF to be removed + * + * Called with fip->ctlr_mutex held + */ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) { struct fcoe_ctlr *fip = new->fip; - struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); + struct fcoe_ctlr_device *cdev; struct fcoe_fcf_device *fcf_dev; list_del(&new->list); fip->fcf_count--; - mutex_lock(&ctlr_dev->lock); - - fcf_dev = fcoe_fcf_to_fcf_dev(new); - WARN_ON(!fcf_dev); - new->fcf_dev = NULL; - fcoe_fcf_device_delete(fcf_dev); - kfree(new); - - mutex_unlock(&ctlr_dev->lock); + /* + * If ctlr_dev doesn't exist then it means we're a libfcoe user + * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device + * or a fcoe_fcf_device. + * + * fnic would be an example of a driver with this behavior. In this + * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above), + * but we don't want to make sysfs changes. + */ + cdev = fcoe_ctlr_to_ctlr_dev(fip); + if (cdev) { + mutex_lock(&cdev->lock); + fcf_dev = fcoe_fcf_to_fcf_dev(new); + WARN_ON(!fcf_dev); + new->fcf_dev = NULL; + fcoe_fcf_device_delete(fcf_dev); + kfree(new); + mutex_unlock(&cdev->lock); + } } /** @@ -300,7 +339,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) spin_unlock_bh(&fip->ctlr_lock); sel = fip->sel_fcf; - if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) + if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) goto unlock; if (!is_zero_ether_addr(fip->dest_addr)) { printk(KERN_NOTICE "libfcoe: host%d: " @@ -1000,7 +1039,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) if (fcf->switch_name == new.switch_name && fcf->fabric_name == new.fabric_name && fcf->fc_map == new.fc_map && - compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { + ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) { found = 1; break; } @@ -1340,7 +1379,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, mp = (struct fip_mac_desc *)desc; if (dlen < sizeof(*mp)) goto err; - if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) + if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac)) goto err; desc_mask &= ~BIT(FIP_DT_MAC); break; @@ -1418,8 +1457,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, * 'port_id' is already validated, check MAC address and * wwpn */ - if (compare_ether_addr(fip->get_src_addr(vn_port), - vp->fd_mac) != 0 || + if (!ether_addr_equal(fip->get_src_addr(vn_port), + vp->fd_mac) || get_unaligned_be64(&vp->fd_wwpn) != vn_port->wwpn) continue; @@ -1453,6 +1492,9 @@ err: */ void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return; skb_queue_tail(&fip->fip_recv_list, skb); schedule_work(&fip->recv_work); } @@ -1479,12 +1521,12 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb) goto drop; eh = eth_hdr(skb); if (fip->mode == FIP_MODE_VN2VN) { - if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && - compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && - compare_ether_addr(eh->h_dest, fcoe_all_p2p)) + if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && + !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) && + !ether_addr_equal(eh->h_dest, fcoe_all_p2p)) goto drop; - } else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && - compare_ether_addr(eh->h_dest, fcoe_all_enode)) + } else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && + !ether_addr_equal(eh->h_dest, fcoe_all_enode)) goto drop; fiph = (struct fip_header *)skb->data; op = ntohs(fiph->fip_op); @@ -1548,9 +1590,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) { struct fcoe_fcf *fcf; struct fcoe_fcf *best = fip->sel_fcf; - struct fcoe_fcf *first; - - first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list); list_for_each_entry(fcf, &fip->fcfs, list) { LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " @@ -1568,17 +1607,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) "" : "un"); continue; } - if (fcf->fabric_name != first->fabric_name || - fcf->vfid != first->vfid || - fcf->fc_map != first->fc_map) { + if (!best || fcf->pri < best->pri || best->flogi_sent) + best = fcf; + if (fcf->fabric_name != best->fabric_name || + fcf->vfid != best->vfid || + fcf->fc_map != best->fc_map) { LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " "or FC-MAP\n"); return NULL; } - if (fcf->flogi_sent) - continue; - if (!best || fcf->pri < best->pri || best->flogi_sent) - best = fcf; } fip->sel_fcf = best; if (best) { @@ -1861,7 +1898,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, * address_mode flag to use FC_OUI-based Ethernet DA. * Otherwise we use the FCoE gateway addr */ - if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { + if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { fcoe_ctlr_map_dest(fip); } else { memcpy(fip->dest_addr, sa, ETH_ALEN); @@ -2095,7 +2132,11 @@ static struct fc_rport_operations fcoe_ctlr_vn_rport_ops = { */ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) { + struct fc_rport_priv *rdata; + mutex_lock(&lport->disc.disc_mutex); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) + lport->tt.rport_logoff(rdata); lport->disc.disc_callback = NULL; mutex_unlock(&lport->disc.disc_mutex); } @@ -2161,7 +2202,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip) if (fip->probe_tries < FIP_VN_RLIM_COUNT) { fip->probe_tries++; - wait = random32() % FIP_VN_PROBE_WAIT; + wait = prandom_u32() % FIP_VN_PROBE_WAIT; } else wait = FIP_VN_RLIM_INT; mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); @@ -2794,7 +2835,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) fcoe_all_vn2vn, 0); fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT + - (random32() % FIP_VN_BEACON_FUZZ)); + (prandom_u32() % FIP_VN_BEACON_FUZZ)); } if (time_before(fip->port_ka_time, next_time)) next_time = fip->port_ka_time; @@ -2815,6 +2856,47 @@ unlock: } /** + * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode + * @lport: The local port to be (re)configured + * @fip: The FCoE controller whose mode is changing + * @fip_mode: The new fip mode + * + * Note that the we shouldn't be changing the libfc discovery settings + * (fc_disc_config) while an lport is going through the libfc state + * machine. The mode can only be changed when a fcoe_ctlr device is + * disabled, so that should ensure that this routine is only called + * when nothing is happening. + */ +static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, + enum fip_state fip_mode) +{ + void *priv; + + WARN_ON(lport->state != LPORT_ST_RESET && + lport->state != LPORT_ST_DISABLED); + + if (fip_mode == FIP_MODE_VN2VN) { + lport->rport_priv_size = sizeof(struct fcoe_rport); + lport->point_to_multipoint = 1; + lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; + lport->tt.disc_start = fcoe_ctlr_disc_start; + lport->tt.disc_stop = fcoe_ctlr_disc_stop; + lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; + priv = fip; + } else { + lport->rport_priv_size = 0; + lport->point_to_multipoint = 0; + lport->tt.disc_recv_req = NULL; + lport->tt.disc_start = NULL; + lport->tt.disc_stop = NULL; + lport->tt.disc_stop_final = NULL; + priv = lport; + } + + fc_disc_config(lport, priv); +} + +/** * fcoe_libfc_config() - Sets up libfc related properties for local port * @lport: The local port to configure libfc for * @fip: The FCoE controller in use by the local port @@ -2833,21 +2915,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, fc_exch_init(lport); fc_elsct_init(lport); fc_lport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) - lport->rport_priv_size = sizeof(struct fcoe_rport); fc_rport_init(lport); - if (fip->mode == FIP_MODE_VN2VN) { - lport->point_to_multipoint = 1; - lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; - lport->tt.disc_start = fcoe_ctlr_disc_start; - lport->tt.disc_stop = fcoe_ctlr_disc_stop; - lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; - mutex_init(&lport->disc.disc_mutex); - INIT_LIST_HEAD(&lport->disc.rports); - lport->disc.priv = fip; - } else { - fc_disc_init(lport); - } + fc_disc_init(lport); + fcoe_ctlr_mode_set(lport, fip, fip->mode); return 0; } EXPORT_SYMBOL_GPL(fcoe_libfc_config); @@ -2875,6 +2945,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected); void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) { struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct fc_lport *lport = ctlr->lp; mutex_lock(&ctlr->ctlr_mutex); switch (ctlr_dev->mode) { @@ -2888,5 +2959,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) } mutex_unlock(&ctlr->ctlr_mutex); + + fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); } EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 8c05ae017f5..045c4e11ee5 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -300,29 +300,29 @@ static ssize_t store_ctlr_mode(struct device *dev, switch (ctlr->enabled) { case FCOE_CTLR_ENABLED: - LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled."); + LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n"); return -EBUSY; case FCOE_CTLR_DISABLED: if (!ctlr->f->set_fcoe_ctlr_mode) { LIBFCOE_SYSFS_DBG(ctlr, - "Mode change not supported by LLD."); + "Mode change not supported by LLD.\n"); return -ENOTSUPP; } ctlr->mode = fcoe_parse_mode(mode); if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { - LIBFCOE_SYSFS_DBG(ctlr, - "Unknown mode %s provided.", buf); + LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", + buf); return -EINVAL; } ctlr->f->set_fcoe_ctlr_mode(ctlr); - LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf); + LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf); return count; case FCOE_CTLR_UNUSED: default: - LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported."); + LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n"); return -ENOTSUPP; }; } @@ -507,7 +507,7 @@ static const struct attribute_group *fcoe_fcf_attr_groups[] = { NULL, }; -struct bus_type fcoe_bus_type; +static struct bus_type fcoe_bus_type; static int fcoe_bus_match(struct device *dev, struct device_driver *drv) @@ -541,35 +541,39 @@ static void fcoe_fcf_device_release(struct device *dev) kfree(fcf); } -struct device_type fcoe_ctlr_device_type = { +static struct device_type fcoe_ctlr_device_type = { .name = "fcoe_ctlr", .groups = fcoe_ctlr_attr_groups, .release = fcoe_ctlr_device_release, }; -struct device_type fcoe_fcf_device_type = { +static struct device_type fcoe_fcf_device_type = { .name = "fcoe_fcf", .groups = fcoe_fcf_attr_groups, .release = fcoe_fcf_device_release, }; -struct bus_attribute fcoe_bus_attr_group[] = { - __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store), - __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store), - __ATTR_NULL +static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store); +static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store); + +static struct attribute *fcoe_bus_attrs[] = { + &bus_attr_ctlr_create.attr, + &bus_attr_ctlr_destroy.attr, + NULL, }; +ATTRIBUTE_GROUPS(fcoe_bus); -struct bus_type fcoe_bus_type = { +static struct bus_type fcoe_bus_type = { .name = "fcoe", .match = &fcoe_bus_match, - .bus_attrs = fcoe_bus_attr_group, + .bus_groups = fcoe_bus_groups, }; /** * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed */ -void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) +static void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) { if (!fcoe_ctlr_work_q(ctlr)) { printk(KERN_ERR @@ -590,8 +594,8 @@ void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr) * Return value: * 1 on success / 0 already queued / < 0 for error */ -int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, - struct work_struct *work) +static int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, + struct work_struct *work) { if (unlikely(!fcoe_ctlr_work_q(ctlr))) { printk(KERN_ERR @@ -609,7 +613,7 @@ int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed */ -void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) +static void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) { if (!fcoe_ctlr_devloss_work_q(ctlr)) { printk(KERN_ERR @@ -631,9 +635,9 @@ void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr) * Return value: * 1 on success / 0 already queued / < 0 for error */ -int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, - struct delayed_work *work, - unsigned long delay) +static int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, + struct delayed_work *work, + unsigned long delay) { if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) { printk(KERN_ERR @@ -653,7 +657,7 @@ static int fcoe_fcf_device_match(struct fcoe_fcf_device *new, if (new->switch_name == old->switch_name && new->fabric_name == old->fabric_name && new->fc_map == old->fc_map && - compare_ether_addr(new->mac, old->mac) == 0) + ether_addr_equal(new->mac, old->mac)) return 1; return 0; } diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index f3a5a53e863..74277c20f6a 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -180,24 +180,10 @@ void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev) { struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev); struct net_device *netdev = fcoe_get_netdev(fip->lp); - struct fcoe_fc_els_lesb *fcoe_lesb; - struct fc_els_lesb fc_lesb; - - __fcoe_get_lesb(fip->lp, &fc_lesb, netdev); - fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb); - - ctlr_dev->lesb.lesb_link_fail = - ntohl(fcoe_lesb->lesb_link_fail); - ctlr_dev->lesb.lesb_vlink_fail = - ntohl(fcoe_lesb->lesb_vlink_fail); - ctlr_dev->lesb.lesb_miss_fka = - ntohl(fcoe_lesb->lesb_miss_fka); - ctlr_dev->lesb.lesb_symb_err = - ntohl(fcoe_lesb->lesb_symb_err); - ctlr_dev->lesb.lesb_err_block = - ntohl(fcoe_lesb->lesb_err_block); - ctlr_dev->lesb.lesb_fcs_error = - ntohl(fcoe_lesb->lesb_fcs_error); + struct fc_els_lesb *fc_lesb; + + fc_lesb = (struct fc_els_lesb *)(&ctlr_dev->lesb); + __fcoe_get_lesb(fip->lp, fc_lesb, netdev); } EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb); @@ -704,7 +690,7 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) static int libfcoe_device_notification(struct notifier_block *notifier, ulong event, void *ptr) { - struct net_device *netdev = ptr; + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UNREGISTER: @@ -721,7 +707,6 @@ ssize_t fcoe_ctlr_create_store(struct bus_type *bus, { struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; - struct fcoe_ctlr_device *ctlr_dev = NULL; int rc = 0; int err; @@ -768,9 +753,8 @@ ssize_t fcoe_ctlr_create_store(struct bus_type *bus, goto out_putdev; } - LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", - ft->name, (ctlr_dev) ? "succeeded" : "failed", - netdev->name); + LIBFCOE_TRANSPORT_DBG("transport %s succeeded to create fcoe on %s.\n", + ft->name, netdev->name); out_putdev: dev_put(netdev); diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 98436c36303..1d3521e13d7 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -27,6 +27,7 @@ #include "fnic_io.h" #include "fnic_res.h" #include "fnic_trace.h" +#include "fnic_stats.h" #include "vnic_dev.h" #include "vnic_wq.h" #include "vnic_rq.h" @@ -38,12 +39,15 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.2" +#define DRV_VERSION "1.6.0.10" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " #define DESC_CLEAN_LOW_WATERMARK 8 -#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ +#define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ +#define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ +#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */ +#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ #define FNIC_DFLT_QUEUE_DEPTH 32 #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ @@ -154,6 +158,9 @@ do { \ FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ shost_printk(kern_level, host, fmt, ##args);) +#define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \ + shost_printk(kern_level, host, fmt, ##args) + extern const char *fnic_state_str[]; enum fnic_intx_intr_index { @@ -192,6 +199,18 @@ enum fnic_state { struct mempool; +enum fnic_evt { + FNIC_EVT_START_VLAN_DISC = 1, + FNIC_EVT_START_FCF_DISC = 2, + FNIC_EVT_MAX, +}; + +struct fnic_event { + struct list_head list; + struct fnic *fnic; + enum fnic_evt event; +}; + /* Per-instance private data structure */ struct fnic { struct fc_lport *lport; @@ -203,16 +222,25 @@ struct fnic { struct vnic_stats *stats; unsigned long stats_time; /* time of stats update */ + unsigned long stats_reset_time; /* time of stats reset */ struct vnic_nic_cfg *nic_cfg; char name[IFNAMSIZ]; struct timer_list notify_timer; /* used for MSI interrupts */ + unsigned int fnic_max_tag_id; unsigned int err_intr_offset; unsigned int link_intr_offset; unsigned int wq_count; unsigned int cq_count; + struct dentry *fnic_stats_debugfs_host; + struct dentry *fnic_stats_debugfs_file; + struct dentry *fnic_reset_debugfs_file; + unsigned int reset_stats; + atomic64_t io_cmpl_skip; + struct fnic_stats fnic_stats; + u32 vlan_hw_insert:1; /* let hw insert the tag */ u32 in_remove:1; /* fnic device in removal */ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ @@ -254,6 +282,18 @@ struct fnic { struct sk_buff_head frame_queue; struct sk_buff_head tx_queue; + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); + struct work_struct fip_frame_work; + struct sk_buff_head fip_frame_queue; + struct timer_list fip_timer; + struct list_head vlans; + spinlock_t vlans_lock; + + struct work_struct event_work; + struct list_head evlist; + /*** FIP related data members -- end ***/ + /* copy work queue cache line section */ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; /* completion queue cache line section */ @@ -278,6 +318,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) } extern struct workqueue_struct *fnic_event_queue; +extern struct workqueue_struct *fnic_fip_queue; extern struct device_attribute *fnic_attrs[]; void fnic_clear_intr_mode(struct fnic *fnic); @@ -289,6 +330,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); void fnic_handle_link(struct work_struct *work); +void fnic_handle_event(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); @@ -321,10 +363,17 @@ void fnic_handle_link_event(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); +void fnic_handle_fip_frame(struct work_struct *work); +void fnic_handle_fip_event(struct fnic *fnic); +void fnic_fcoe_reset_vlans(struct fnic *fnic); +void fnic_fcoe_evlist_free(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct fnic *fnic); + static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) { return ((fnic->state_flags & st_flags) == st_flags); } void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); +void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); #endif /* _FNIC_H_ */ diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index adc1f7f471f..2c613bdea78 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -23,14 +23,97 @@ static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; +static struct dentry *fnic_stats_debugfs_root; + +static struct dentry *fnic_fc_trace_debugfs_file; +static struct dentry *fnic_fc_rdata_trace_debugfs_file; +static struct dentry *fnic_fc_trace_enable; +static struct dentry *fnic_fc_trace_clear; + +struct fc_trace_flag_type { + u8 fc_row_file; + u8 fc_normal_file; + u8 fnic_trace; + u8 fc_trace; + u8 fc_clear; +}; + +static struct fc_trace_flag_type *fc_trc_flag; + +/* + * fnic_debugfs_init - Initialize debugfs for fnic debug logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic debugfs + * file system. If not already created, this routine will create the + * fnic directory and statistics directory for trace buffer and + * stats logging. + */ +int fnic_debugfs_init(void) +{ + int rc = -1; + fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); + if (!fnic_trace_debugfs_root) { + printk(KERN_DEBUG "Cannot create debugfs root\n"); + return rc; + } + + if (!fnic_trace_debugfs_root) { + printk(KERN_DEBUG + "fnic root directory doesn't exist in debugfs\n"); + return rc; + } + + fnic_stats_debugfs_root = debugfs_create_dir("statistics", + fnic_trace_debugfs_root); + if (!fnic_stats_debugfs_root) { + printk(KERN_DEBUG "Cannot create Statistics directory\n"); + return rc; + } + + /* Allocate memory to structure */ + fc_trc_flag = (struct fc_trace_flag_type *) + vmalloc(sizeof(struct fc_trace_flag_type)); + + if (fc_trc_flag) { + fc_trc_flag->fc_row_file = 0; + fc_trc_flag->fc_normal_file = 1; + fc_trc_flag->fnic_trace = 2; + fc_trc_flag->fc_trace = 3; + fc_trc_flag->fc_clear = 4; + } + + rc = 0; + return rc; +} + +/* + * fnic_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic. + */ +void fnic_debugfs_terminate(void) +{ + debugfs_remove(fnic_stats_debugfs_root); + fnic_stats_debugfs_root = NULL; + + debugfs_remove(fnic_trace_debugfs_root); + fnic_trace_debugfs_root = NULL; + + if (fc_trc_flag) + vfree(fc_trc_flag); +} /* - * fnic_trace_ctrl_open - Open the trace_enable file + * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace + * Or Open fc_trace_enable file for fc_trace * @inode: The inode pointer. * @file: The file pointer to attach the trace enable/disable flag. * * Description: - * This routine opens a debugsfs file trace_enable. + * This routine opens a debugsfs file trace_enable or fc_trace_enable. * * Returns: * This function returns zero if successful. @@ -42,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp) } /* - * fnic_trace_ctrl_read - Read a trace_enable debugfs file + * fnic_trace_ctrl_read - + * Read trace_enable ,fc_trace_enable + * or fc_trace_clear debugfs file * @filp: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @cnt: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: - * This routine reads value of variable fnic_tracing_enabled - * and stores into local @buf. It will start reading file at @ppos and + * This routine reads value of variable fnic_tracing_enabled or + * fnic_fc_tracing_enabled or fnic_fc_trace_cleared + * and stores into local @buf. + * It will start reading file at @ppos and * copy up to @cnt of data to @ubuf from @buf. * * Returns: @@ -62,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp, { char buf[64]; int len; - len = sprintf(buf, "%u\n", fnic_tracing_enabled); + u8 *trace_type; + len = 0; + trace_type = (u8 *)filp->private_data; + if (*trace_type == fc_trc_flag->fnic_trace) + len = sprintf(buf, "%u\n", fnic_tracing_enabled); + else if (*trace_type == fc_trc_flag->fc_trace) + len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled); + else if (*trace_type == fc_trc_flag->fc_clear) + len = sprintf(buf, "%u\n", fnic_fc_trace_cleared); + else + pr_err("fnic: Cannot read to any debugfs file\n"); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } /* - * fnic_trace_ctrl_write - Write to trace_enable debugfs file + * fnic_trace_ctrl_write - + * Write to trace_enable, fc_trace_enable or + * fc_trace_clear debugfs file * @filp: The file pointer to write from. * @ubuf: The buffer to copy the data from. * @cnt: The number of bytes to write. @@ -76,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp, * * Description: * This routine writes data from user buffer @ubuf to buffer @buf and - * sets fnic_tracing_enabled value as per user input. + * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared + * value as per user input. * * Returns: * This function returns the amount of data that was written. @@ -88,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp, char buf[64]; unsigned long val; int ret; + u8 *trace_type; + trace_type = (u8 *)filp->private_data; if (cnt >= sizeof(buf)) return -EINVAL; @@ -101,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp, if (ret < 0) return ret; - fnic_tracing_enabled = val; + if (*trace_type == fc_trc_flag->fnic_trace) + fnic_tracing_enabled = val; + else if (*trace_type == fc_trc_flag->fc_trace) + fnic_fc_tracing_enabled = val; + else if (*trace_type == fc_trc_flag->fc_clear) + fnic_fc_trace_cleared = val; + else + pr_err("fnic: cannot write to any debufs file\n"); + (*ppos)++; return cnt; } +static const struct file_operations fnic_trace_ctrl_fops = { + .owner = THIS_MODULE, + .open = fnic_trace_ctrl_open, + .read = fnic_trace_ctrl_read, + .write = fnic_trace_ctrl_write, +}; + /* * fnic_trace_debugfs_open - Open the fnic trace log * @inode: The inode pointer @@ -126,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode, struct file *file) { fnic_dbgfs_t *fnic_dbg_prt; + u8 *rdata_ptr; + rdata_ptr = (u8 *)inode->i_private; fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); if (!fnic_dbg_prt) return -ENOMEM; - fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE))); - if (!fnic_dbg_prt->buffer) { - kfree(fnic_dbg_prt); - return -ENOMEM; + if (*rdata_ptr == fc_trc_flag->fnic_trace) { + fnic_dbg_prt->buffer = vmalloc(3 * + (trace_max_pages * PAGE_SIZE)); + if (!fnic_dbg_prt->buffer) { + kfree(fnic_dbg_prt); + return -ENOMEM; + } + memset((void *)fnic_dbg_prt->buffer, 0, + 3 * (trace_max_pages * PAGE_SIZE)); + fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); + } else { + fnic_dbg_prt->buffer = + vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); + if (!fnic_dbg_prt->buffer) { + kfree(fnic_dbg_prt); + return -ENOMEM; + } + memset((void *)fnic_dbg_prt->buffer, 0, + 3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); + fnic_dbg_prt->buffer_len = + fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr); } - memset((void *)fnic_dbg_prt->buffer, 0, - (3*(trace_max_pages * PAGE_SIZE))); - fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); file->private_data = fnic_dbg_prt; + return 0; } @@ -164,20 +298,8 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file, int howto) { fnic_dbgfs_t *fnic_dbg_prt = file->private_data; - loff_t pos = -1; - - switch (howto) { - case 0: - pos = offset; - break; - case 1: - pos = file->f_pos + offset; - break; - case 2: - pos = fnic_dbg_prt->buffer_len - offset; - } - return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ? - -EINVAL : (file->f_pos = pos); + return fixed_size_llseek(file, offset, howto, + fnic_dbg_prt->buffer_len); } /* @@ -232,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode, return 0; } -static const struct file_operations fnic_trace_ctrl_fops = { - .owner = THIS_MODULE, - .open = fnic_trace_ctrl_open, - .read = fnic_trace_ctrl_read, - .write = fnic_trace_ctrl_write, -}; - static const struct file_operations fnic_trace_debugfs_fops = { .owner = THIS_MODULE, .open = fnic_trace_debugfs_open, @@ -253,37 +368,39 @@ static const struct file_operations fnic_trace_debugfs_fops = { * Description: * When Debugfs is configured this routine sets up the fnic debugfs * file system. If not already created, this routine will create the - * fnic directory. It will create file trace to log fnic trace buffer - * output into debugfs and it will also create file trace_enable to - * control enable/disable of trace logging into trace buffer. + * create file trace to log fnic trace buffer output into debugfs and + * it will also create file trace_enable to control enable/disable of + * trace logging into trace buffer. */ int fnic_trace_debugfs_init(void) { int rc = -1; - fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); if (!fnic_trace_debugfs_root) { - printk(KERN_DEBUG "Cannot create debugfs root\n"); + printk(KERN_DEBUG + "FNIC Debugfs root directory doesn't exist\n"); return rc; } fnic_trace_enable = debugfs_create_file("tracing_enable", - S_IFREG|S_IRUGO|S_IWUSR, - fnic_trace_debugfs_root, - NULL, &fnic_trace_ctrl_fops); + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fnic_trace), + &fnic_trace_ctrl_fops); if (!fnic_trace_enable) { - printk(KERN_DEBUG "Cannot create trace_enable file" - " under debugfs"); + printk(KERN_DEBUG + "Cannot create trace_enable file under debugfs\n"); return rc; } fnic_trace_debugfs_file = debugfs_create_file("trace", - S_IFREG|S_IRUGO|S_IWUSR, - fnic_trace_debugfs_root, - NULL, - &fnic_trace_debugfs_fops); + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fnic_trace), + &fnic_trace_debugfs_fops); if (!fnic_trace_debugfs_file) { - printk(KERN_DEBUG "Cannot create trace file under debugfs"); + printk(KERN_DEBUG + "Cannot create trace file under debugfs\n"); return rc; } rc = 0; @@ -299,16 +416,421 @@ int fnic_trace_debugfs_init(void) */ void fnic_trace_debugfs_terminate(void) { - if (fnic_trace_debugfs_file) { - debugfs_remove(fnic_trace_debugfs_file); - fnic_trace_debugfs_file = NULL; + debugfs_remove(fnic_trace_debugfs_file); + fnic_trace_debugfs_file = NULL; + + debugfs_remove(fnic_trace_enable); + fnic_trace_enable = NULL; +} + +/* + * fnic_fc_trace_debugfs_init - + * Initialize debugfs for fnic control frame trace logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic_fc debugfs + * file system. If not already created, this routine will create the + * create file trace to log fnic fc trace buffer output into debugfs and + * it will also create file fc_trace_enable to control enable/disable of + * trace logging into trace buffer. + */ + +int fnic_fc_trace_debugfs_init(void) +{ + int rc = -1; + + if (!fnic_trace_debugfs_root) { + pr_err("fnic:Debugfs root directory doesn't exist\n"); + return rc; } - if (fnic_trace_enable) { - debugfs_remove(fnic_trace_enable); - fnic_trace_enable = NULL; + + fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_trace), + &fnic_trace_ctrl_fops); + + if (!fnic_fc_trace_enable) { + pr_err("fnic: Failed create fc_trace_enable file\n"); + return rc; } - if (fnic_trace_debugfs_root) { - debugfs_remove(fnic_trace_debugfs_root); - fnic_trace_debugfs_root = NULL; + + fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_clear), + &fnic_trace_ctrl_fops); + + if (!fnic_fc_trace_clear) { + pr_err("fnic: Failed to create fc_trace_enable file\n"); + return rc; } + + fnic_fc_rdata_trace_debugfs_file = + debugfs_create_file("fc_trace_rdata", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_normal_file), + &fnic_trace_debugfs_fops); + + if (!fnic_fc_rdata_trace_debugfs_file) { + pr_err("fnic: Failed create fc_rdata_trace file\n"); + return rc; + } + + fnic_fc_trace_debugfs_file = + debugfs_create_file("fc_trace", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_row_file), + &fnic_trace_debugfs_fops); + + if (!fnic_fc_trace_debugfs_file) { + pr_err("fnic: Failed to create fc_trace file\n"); + return rc; + } + rc = 0; + return rc; +} + +/* + * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic_fc trace logging. + */ + +void fnic_fc_trace_debugfs_terminate(void) +{ + debugfs_remove(fnic_fc_trace_debugfs_file); + fnic_fc_trace_debugfs_file = NULL; + + debugfs_remove(fnic_fc_rdata_trace_debugfs_file); + fnic_fc_rdata_trace_debugfs_file = NULL; + + debugfs_remove(fnic_fc_trace_enable); + fnic_fc_trace_enable = NULL; + + debugfs_remove(fnic_fc_trace_clear); + fnic_fc_trace_clear = NULL; +} + +/* + * fnic_reset_stats_open - Open the reset_stats file + * @inode: The inode pointer. + * @file: The file pointer to attach the stats reset flag. + * + * Description: + * This routine opens a debugsfs file reset_stats and stores i_private data + * to debug structure to retrieve later for while performing other + * file oprations. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_reset_stats_open(struct inode *inode, struct file *file) +{ + struct stats_debug_info *debug; + + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->i_private = inode->i_private; + + file->private_data = debug; + + return 0; +} + +/* + * fnic_reset_stats_read - Read a reset_stats debugfs file + * @filp: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @cnt: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads value of variable reset_stats + * and stores into local @buf. It will start reading file at @ppos and + * copy up to @cnt of data to @ubuf from @buf. + * + * Returns: + * This function returns the amount of data that was read. + */ +static ssize_t fnic_reset_stats_read(struct file *file, + char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct stats_debug_info *debug = file->private_data; + struct fnic *fnic = (struct fnic *)debug->i_private; + char buf[64]; + int len; + + len = sprintf(buf, "%u\n", fnic->reset_stats); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +/* + * fnic_reset_stats_write - Write to reset_stats debugfs file + * @filp: The file pointer to write from. + * @ubuf: The buffer to copy the data from. + * @cnt: The number of bytes to write. + * @ppos: The position in the file to start writing to. + * + * Description: + * This routine writes data from user buffer @ubuf to buffer @buf and + * resets cumulative stats of fnic. + * + * Returns: + * This function returns the amount of data that was written. + */ +static ssize_t fnic_reset_stats_write(struct file *file, + const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct stats_debug_info *debug = file->private_data; + struct fnic *fnic = (struct fnic *)debug->i_private; + struct fnic_stats *stats = &fnic->fnic_stats; + u64 *io_stats_p = (u64 *)&stats->io_stats; + u64 *fw_stats_p = (u64 *)&stats->fw_stats; + char buf[64]; + unsigned long val; + int ret; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + fnic->reset_stats = val; + + if (fnic->reset_stats) { + /* Skip variable is used to avoid descrepancies to Num IOs + * and IO Completions stats. Skip incrementing No IO Compls + * for pending active IOs after reset stats + */ + atomic64_set(&fnic->io_cmpl_skip, + atomic64_read(&stats->io_stats.active_ios)); + memset(&stats->abts_stats, 0, sizeof(struct abort_stats)); + memset(&stats->term_stats, 0, + sizeof(struct terminate_stats)); + memset(&stats->reset_stats, 0, sizeof(struct reset_stats)); + memset(&stats->misc_stats, 0, sizeof(struct misc_stats)); + memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats)); + memset(io_stats_p+1, 0, + sizeof(struct io_path_stats) - sizeof(u64)); + memset(fw_stats_p+1, 0, + sizeof(struct fw_stats) - sizeof(u64)); + } + + (*ppos)++; + return cnt; +} + +/* + * fnic_reset_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_reset_stats_release(struct inode *inode, + struct file *file) +{ + struct stats_debug_info *debug = file->private_data; + kfree(debug); + return 0; +} + +/* + * fnic_stats_debugfs_open - Open the stats file for specific host + * and get fnic stats. + * @inode: The inode pointer. + * @file: The file pointer to attach the specific host statistics. + * + * Description: + * This routine opens a debugsfs file stats of specific host and print + * fnic stats. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_stats_debugfs_open(struct inode *inode, + struct file *file) +{ + struct fnic *fnic = inode->i_private; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct stats_debug_info *debug; + int buf_size = 2 * PAGE_SIZE; + + debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->debug_buffer = vmalloc(buf_size); + if (!debug->debug_buffer) { + kfree(debug); + return -ENOMEM; + } + + debug->buf_size = buf_size; + memset((void *)debug->debug_buffer, 0, buf_size); + debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); + + file->private_data = debug; + + return 0; +} + +/* + * fnic_stats_debugfs_read - Read a debugfs file + * @file: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @pos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the buffer indicated in the private_data + * field of @file. It will start reading at @pos and copy up to @nbytes of + * data to @ubuf. + * + * Returns: + * This function returns the amount of data that was read (this could be + * less than @nbytes if the end of the file was reached). + */ +static ssize_t fnic_stats_debugfs_read(struct file *file, + char __user *ubuf, + size_t nbytes, + loff_t *pos) +{ + struct stats_debug_info *debug = file->private_data; + int rc = 0; + rc = simple_read_from_buffer(ubuf, nbytes, pos, + debug->debug_buffer, + debug->buffer_len); + return rc; +} + +/* + * fnic_stats_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_stats_debugfs_release(struct inode *inode, + struct file *file) +{ + struct stats_debug_info *debug = file->private_data; + vfree(debug->debug_buffer); + kfree(debug); + return 0; +} + +static const struct file_operations fnic_stats_debugfs_fops = { + .owner = THIS_MODULE, + .open = fnic_stats_debugfs_open, + .read = fnic_stats_debugfs_read, + .release = fnic_stats_debugfs_release, +}; + +static const struct file_operations fnic_reset_debugfs_fops = { + .owner = THIS_MODULE, + .open = fnic_reset_stats_open, + .read = fnic_reset_stats_read, + .write = fnic_reset_stats_write, + .release = fnic_reset_stats_release, +}; + +/* + * fnic_stats_init - Initialize stats struct and create stats file per fnic + * + * Description: + * When Debugfs is configured this routine sets up the stats file per fnic + * It will create file stats and reset_stats under statistics/host# directory + * to log per fnic stats. + */ +int fnic_stats_debugfs_init(struct fnic *fnic) +{ + int rc = -1; + char name[16]; + + snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + + if (!fnic_stats_debugfs_root) { + printk(KERN_DEBUG "fnic_stats root doesn't exist\n"); + return rc; + } + fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, + fnic_stats_debugfs_root); + if (!fnic->fnic_stats_debugfs_host) { + printk(KERN_DEBUG "Cannot create host directory\n"); + return rc; + } + + fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_stats_debugfs_fops); + if (!fnic->fnic_stats_debugfs_file) { + printk(KERN_DEBUG "Cannot create host stats file\n"); + return rc; + } + + fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + fnic->fnic_stats_debugfs_host, + fnic, + &fnic_reset_debugfs_fops); + if (!fnic->fnic_reset_debugfs_file) { + printk(KERN_DEBUG "Cannot create host stats file\n"); + return rc; + } + rc = 0; + return rc; +} + +/* + * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic stats. + */ +void fnic_stats_debugfs_remove(struct fnic *fnic) +{ + if (!fnic) + return; + + debugfs_remove(fnic->fnic_stats_debugfs_file); + fnic->fnic_stats_debugfs_file = NULL; + + debugfs_remove(fnic->fnic_reset_debugfs_file); + fnic->fnic_reset_debugfs_file = NULL; + + debugfs_remove(fnic->fnic_stats_debugfs_host); + fnic->fnic_stats_debugfs_host = NULL; } diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 483eb9dbe66..1b948f633fc 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -31,12 +31,20 @@ #include <scsi/libfc.h> #include "fnic_io.h" #include "fnic.h" +#include "fnic_fip.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +static u8 fcoe_all_fcfs[ETH_ALEN]; +struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; static void fnic_set_eth_mode(struct fnic *); +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); void fnic_handle_link(struct work_struct *work) { @@ -58,34 +66,74 @@ void fnic_handle_link(struct work_struct *work) fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); if (old_link_status == fnic->link_status) { - if (!fnic->link_status) + if (!fnic->link_status) { /* DOWN -> DOWN */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); - else { + fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_LE, "Link Status: DOWN->DOWN", + strlen("Link Status: DOWN->DOWN")); + } else { if (old_link_down_cnt != fnic->link_down_cnt) { /* UP -> DOWN -> UP */ fnic->lport->host_stats.link_failure_count++; spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, + "Link Status:UP_DOWN_UP", + strlen("Link_Status:UP_DOWN_UP") + ); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); fcoe_ctlr_link_down(&fnic->ctlr); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, + "Link Status: UP_DOWN_UP_VLAN", + strlen( + "Link Status: UP_DOWN_UP_VLAN") + ); + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); } else /* UP -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: UP_UP", + strlen("Link Status: UP_UP")); } } else if (fnic->link_status) { /* DOWN -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + strlen("Link Status: DOWN_UP_VLAN")); + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); + fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); fcoe_ctlr_link_up(&fnic->ctlr); } else { /* UP -> DOWN */ fnic->lport->host_stats.link_failure_count++; spin_unlock_irqrestore(&fnic->fnic_lock, flags); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: UP_DOWN", + strlen("Link Status: UP_DOWN")); fcoe_ctlr_link_down(&fnic->ctlr); } @@ -128,6 +176,443 @@ void fnic_handle_frame(struct work_struct *work) } } +void fnic_fcoe_evlist_free(struct fnic *fnic) +{ + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + list_del(&fevt->list); + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fnic_handle_event(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, event_work); + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + if (fnic->stop_rx_link_events) { + list_del(&fevt->list); + kfree(fevt); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_del(&fevt->list); + switch (fevt->event) { + case FNIC_EVT_START_VLAN_DISC: + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + spin_lock_irqsave(&fnic->fnic_lock, flags); + break; + case FNIC_EVT_START_FCF_DISC: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start FCF Discovery\n"); + fnic_fcoe_start_fcf_disc(fnic); + break; + default: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unknown event 0x%x\n", fevt->event); + break; + } + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/** + * Check if the Received FIP FLOGI frame is rejected + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is rejected with unsupported cmd with + * insufficient resource els explanation. + */ +static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, + struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + enum fip_desc_type els_dtype = 0; + u16 op; + u8 els_op; + u8 sub; + + size_t els_len = 0; + size_t rlen; + size_t dlen = 0; + + if (skb_linearize(skb)) + return 0; + + if (skb->len < sizeof(*fiph)) + return 0; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op != FIP_OP_LS) + return 0; + + if (sub != FIP_SC_REP) + return 0; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return 0; + + desc = (struct fip_desc *)(fiph + 1); + dlen = desc->fip_dlen * FIP_BPW; + + if (desc->fip_dtype == FIP_DT_FLOGI) { + + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + return 0; + + els_len = dlen - sizeof(*els); + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + els_dtype = desc->fip_dtype; + + if (!fh) + return 0; + + /* + * ELS command code, reason and explanation should be = Reject, + * unsupported command and insufficient resource + */ + els_op = *(u8 *)(fh + 1); + if (els_op == ELS_LS_RJT) { + shost_printk(KERN_INFO, lport->host, + "Flogi Request Rejected by Switch\n"); + return 1; + } + shost_printk(KERN_INFO, lport->host, + "Flogi Request Accepted by Switch\n"); + } + return 0; +} + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct sk_buff *skb; + char *eth_fr; + int fr_len; + struct fip_vlan *vlan; + u64 vlan_tov; + + fnic_fcoe_reset_vlans(fnic); + fnic->set_vlan(fnic, 0); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Sending VLAN request...\n"); + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + fr_len = sizeof(*vlan); + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + /* set a timer so that we can retry if there no response */ + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); +} + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fip_header *fiph; + struct fip_desc *desc; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u16 vid; + size_t rlen; + size_t dlen; + struct fcoe_vlan *vlan; + u64 sol_time; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response...\n"); + + fiph = (struct fip_header *) skb->data; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", + ntohs(fiph->fip_op), fiph->fip_subcode); + + rlen = ntohs(fiph->fip_dl_len) * 4; + fnic_fcoe_reset_vlans(fnic); + spin_lock_irqsave(&fnic->vlans_lock, flags); + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + shost_printk(KERN_INFO, fnic->lport->host, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kmalloc(sizeof(*vlan), + GFP_ATOMIC); + if (!vlan) { + /* retry from timer */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + memset(vlan, 0, sizeof(struct fcoe_vlan)); + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlans); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* any VLAN descriptors present ? */ + if (list_empty(&fnic->vlans)) { + /* retry from timer */ + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "No VLAN descriptors in FIP VLAN response\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + goto out; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(fip); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +out: + return; +} + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count = 1; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(&fnic->ctlr); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +} + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) +{ + unsigned long flags; + struct fcoe_vlan *fvlan; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; + } + + fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + if (fvlan->state == FIP_VLAN_USED) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + + if (fvlan->state == FIP_VLAN_SENT) { + fvlan->state = FIP_VLAN_USED; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; +} + +static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) +{ + struct fnic_event *fevt; + unsigned long flags; + + fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); + if (!fevt) + return; + + fevt->fnic = fnic; + fevt->event = ev; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fevt->list, &fnic->evlist); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_work(&fnic->event_work); +} + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +{ + struct fip_header *fiph; + int ret = 1; + u16 op; + u8 sub; + + if (!skb || !(skb->data)) + return -1; + + if (skb_linearize(skb)) + goto drop; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { + if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) + goto drop; + /* pass it on to fcoe */ + ret = 1; + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { + /* set the vlan as used */ + fnic_fcoe_process_vlan_resp(fnic, skb); + ret = 0; + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + /* received CVL request, restart vlan disc */ + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + /* pass it on to fcoe */ + ret = 1; + } +drop: + return ret; +} + +void fnic_handle_fip_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned long flags; + struct sk_buff *skb; + struct ethhdr *eh; + + while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->fip_frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + eh = (struct ethhdr *)skb->data; + if (eh->h_proto == htons(ETH_P_FIP)) { + skb_pull(skb, sizeof(*eh)); + if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { + dev_kfree_skb(skb); + continue; + } + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { + atomic64_inc( + &fnic_stats->vlan_stats.flogi_rejects); + shost_printk(KERN_INFO, fnic->lport->host, + "Trigger a Link down - VLAN Disc\n"); + fcoe_ctlr_link_down(&fnic->ctlr); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + dev_kfree_skb(skb); + continue; + } + fcoe_ctlr_recv(&fnic->ctlr, skb); + continue; + } + } +} + /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. @@ -150,8 +635,18 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) skb_reset_mac_header(skb); } if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - fcoe_ctlr_recv(&fnic->ctlr, skb); + if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { + printk(KERN_ERR "Dropped FIP frame, as firmware " + "uses non-FIP mode, Enable FIP " + "using UCSM\n"); + goto drop; + } + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + skb_queue_tail(&fnic->fip_frame_queue, skb); + queue_work(fnic_fip_queue, &fnic->fip_frame_work); return 1; /* let caller know packet was used */ } if (eh->h_proto != htons(ETH_P_FCOE)) @@ -192,13 +687,13 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new) if (is_zero_ether_addr(new)) new = ctl; - if (!compare_ether_addr(data, new)) + if (ether_addr_equal(data, new)) return; FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); - if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) vnic_dev_del_addr(fnic->vdev, data); memcpy(data, new, ETH_ALEN); - if (compare_ether_addr(new, ctl)) + if (!ether_addr_equal(new, ctl)) vnic_dev_add_addr(fnic->vdev, new); } @@ -294,6 +789,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc struct fnic *fnic = vnic_dev_priv(rq->vdev); struct sk_buff *skb; struct fc_frame *fp; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; unsigned int eth_hdrs_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe = 0, fcoe_sof, fcoe_eof; @@ -344,6 +840,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc eth_hdrs_stripped = 0; skb_trim(skb, bytes_written); if (!fcs_ok) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "fcs error. dropping packet.\n"); goto drop; @@ -359,6 +856,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc } if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + atomic64_inc(&fnic_stats->misc_stats.frame_errors); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "fnic rq_cmpl fcoe x%x fcsok x%x" " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" @@ -375,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc } fr_dev(fp) = fnic->lport; spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, + (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } skb_queue_tail(&fnic->frame_queue, skb); queue_work(fnic_event_queue, &fnic->frame_work); @@ -482,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + } else { + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } } pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); @@ -554,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, + (char *)eth_hdr, tot_len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + spin_lock_irqsave(&fnic->wq_lock[0], flags); if (!vnic_wq_desc_avail(wq)) { @@ -720,3 +1236,106 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) dev_kfree_skb(fp_skb(fp)); buf->os_buf = NULL; } + +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fcoe_vlan *next; + + /* + * indicate a link down to fcoe so that all fcf's are free'd + * might not be required since we did this before sending vlan + * discovery request + */ + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlans)) { + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); +} + +void fnic_handle_fip_timer(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u64 sol_time; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->ctlr.mode == FIP_ST_NON_FIP) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + /* no vlans available, try again */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + shost_printk(KERN_DEBUG, fnic->lport->host, + "fip_timer: vlan %d state %d sol_count %d\n", + vlan->vid, vlan->state, vlan->sol_count); + switch (vlan->state) { + case FIP_VLAN_USED: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "FIP VLAN is selected for FC transaction\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + break; + case FIP_VLAN_FAILED: + /* if all vlans are in failed state, restart vlan disc */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + break; + case FIP_VLAN_SENT: + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + shost_printk(KERN_INFO, fnic->lport->host, + "Dequeue this VLAN ID %d from list\n", + vlan->vid); + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlans)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + shost_printk(KERN_INFO, fnic->lport->host, + "fip_timer: vlan list empty, " + "trigger vlan disc\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + /* check the next vlan */ + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + vlan->sol_count++; + sol_time = jiffies + msecs_to_jiffies + (FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + break; + } +} diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h new file mode 100644 index 00000000000..87e74c2ab97 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fip.h @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _FNIC_FIP_H_ +#define _FNIC_FIP_H_ + + +#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ +#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ +#define FCOE_CTLR_MAX_SOL 8 + +#define FINC_MAX_FLOGI_REJECTS 8 + +/* + * FIP_DT_VLAN descriptor. + */ +struct fip_vlan_desc { + struct fip_desc fd_desc; + __be16 fd_vlan; +} __attribute__((packed)); + +struct vlan { + __be16 vid; + __be16 type; +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + u16 vid; /* vlan ID */ + u16 sol_count; /* no. of sols sent */ + u16 state; /* state */ +}; + +enum fip_vlan_state { + FIP_VLAN_AVAIL = 0, /* don't do anything */ + FIP_VLAN_SENT = 1, /* sent */ + FIP_VLAN_USED = 2, /* succeed */ + FIP_VLAN_FAILED = 3, /* failed to response */ +}; + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 5c1f223cabc..7d9b54ae7f6 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -37,6 +37,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data) if (!pba) return IRQ_NONE; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + if (pba & (1 << FNIC_INTX_NOTIFY)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); fnic_handle_link_event(fnic); @@ -66,6 +69,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data) struct fnic *fnic = data; unsigned long work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); @@ -83,6 +89,9 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data) struct fnic *fnic = data; unsigned long rq_work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + rq_work_done = fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], rq_work_done, @@ -97,6 +106,9 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data) struct fnic *fnic = data; unsigned long wq_work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + wq_work_done = fnic_wq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], wq_work_done, @@ -110,6 +122,9 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, @@ -122,6 +137,9 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) { struct fnic *fnic = data; + fnic->fnic_stats.misc_stats.last_isr_time = jiffies; + atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); + vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); fnic_log_q_error(fnic); fnic_handle_link_event(fnic); diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index d601ac543c5..8c56fdc3a45 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -39,6 +39,7 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" +#include "fnic_fip.h" #include "fnic.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -73,6 +74,15 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " "for fnic trace buffer"); +unsigned int fnic_fc_trace_max_pages = 64; +module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_fc_trace_max_pages, + "Total allocated memory pages for fc trace buffer"); + +static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; +module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); + static struct libfc_function_template fnic_transport_template = { .frame_send = fnic_send, .lport_set_port_id = fnic_set_port_id, @@ -90,7 +100,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev) if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); + scsi_activate_tcq(sdev, fnic_max_qdepth); return 0; } @@ -106,7 +116,7 @@ static struct scsi_host_template fnic_host_template = { .change_queue_type = fc_change_queue_type, .this_id = -1, .cmd_per_lun = 3, - .can_queue = FNIC_MAX_IO_REQ, + .can_queue = FNIC_DFLT_IO_REQ, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = FNIC_MAX_SG_DESC_CNT, .max_sectors = 0xffff, @@ -125,6 +135,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) static void fnic_get_host_speed(struct Scsi_Host *shost); static struct scsi_transport_template *fnic_fc_transport; static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); +static void fnic_reset_host_stats(struct Scsi_Host *); static struct fc_function_template fnic_fc_functions = { @@ -152,6 +163,7 @@ static struct fc_function_template fnic_fc_functions = { .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, .issue_fc_host_lip = fnic_reset, .get_fc_host_stats = fnic_get_stats, + .reset_fc_host_stats = fnic_reset_host_stats, .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), .terminate_rport_io = fnic_terminate_rport_io, .bsg_request = fc_lport_bsg_request, @@ -205,13 +217,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; stats->invalid_crc_count = vs->rx.rx_crc_errors; - stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; + stats->seconds_since_last_reset = + (jiffies - fnic->stats_reset_time) / HZ; stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); return stats; } +/* + * fnic_dump_fchost_stats + * note : dumps fc_statistics into system logs + */ +void fnic_dump_fchost_stats(struct Scsi_Host *host, + struct fc_host_statistics *stats) +{ + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: seconds since last reset = %llu\n", + stats->seconds_since_last_reset); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: tx frames = %llu\n", + stats->tx_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: tx words = %llu\n", + stats->tx_words); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: rx frames = %llu\n", + stats->rx_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: rx words = %llu\n", + stats->rx_words); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: lip count = %llu\n", + stats->lip_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: nos count = %llu\n", + stats->nos_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: error frames = %llu\n", + stats->error_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: dumped frames = %llu\n", + stats->dumped_frames); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: link failure count = %llu\n", + stats->link_failure_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: loss of sync count = %llu\n", + stats->loss_of_sync_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: loss of signal count = %llu\n", + stats->loss_of_signal_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: prim seq protocol err count = %llu\n", + stats->prim_seq_protocol_err_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: invalid tx word count= %llu\n", + stats->invalid_tx_word_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: invalid crc count = %llu\n", + stats->invalid_crc_count); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp input requests = %llu\n", + stats->fcp_input_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp output requests = %llu\n", + stats->fcp_output_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp control requests = %llu\n", + stats->fcp_control_requests); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp input megabytes = %llu\n", + stats->fcp_input_megabytes); + FNIC_MAIN_NOTE(KERN_NOTICE, host, + "fnic: fcp output megabytes = %llu\n", + stats->fcp_output_megabytes); + return; +} + +/* + * fnic_reset_host_stats : clears host stats + * note : called when reset_statistics set under sysfs dir + */ +static void fnic_reset_host_stats(struct Scsi_Host *host) +{ + int ret; + struct fc_lport *lp = shost_priv(host); + struct fnic *fnic = lport_priv(lp); + struct fc_host_statistics *stats; + unsigned long flags; + + /* dump current stats, before clearing them */ + stats = fnic_get_stats(host); + fnic_dump_fchost_stats(host, stats); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + ret = vnic_dev_stats_clear(fnic->vdev); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (ret) { + FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, + "fnic: Reset vnic stats failed" + " 0x%x", ret); + return; + } + fnic->stats_reset_time = jiffies; + memset(stats, 0, sizeof(*stats)); + + return; +} + void fnic_log_q_error(struct fnic *fnic) { unsigned int i; @@ -292,6 +407,13 @@ static void fnic_notify_timer(unsigned long data) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } +static void fnic_fip_notify_timer(unsigned long data) +{ + struct fnic *fnic = (struct fnic *)data; + + fnic_handle_fip_timer(fnic); +} + static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -403,6 +525,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport) return fnic->data_src_addr; } +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +{ + u16 old_vlan; + old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} + static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; @@ -433,11 +561,11 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) host->transportt = fnic_fc_transport; - err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); + err = fnic_stats_debugfs_init(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, - "Unable to alloc shared tag map\n"); - goto err_out_free_hba; + "Failed to initialize debugfs for stats\n"); + fnic_stats_debugfs_remove(fnic); } /* Setup PCI resources */ @@ -462,10 +590,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 40-bit first, and + * limitation for the device. Try 64-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -482,10 +610,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, - "Unable to obtain 40-bit DMA " + "Unable to obtain 64-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } @@ -552,6 +680,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "aborting.\n"); goto err_out_dev_close; } + + /* Configure Maximum Outstanding IO reqs*/ + if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { + host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, + max_t(u32, FNIC_MIN_IO_REQ, + fnic->config.io_throttle_count)); + } + fnic->fnic_max_tag_id = host->can_queue; + + err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "Unable to alloc shared tag map\n"); + goto err_out_dev_close; + } + host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; host->max_cmd_len = FCOE_MAX_CMD_LEN; @@ -620,11 +764,21 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + fnic->set_vlan = fnic_set_vlan; fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); + setup_timer(&fnic->fip_timer, fnic_fip_notify_timer, + (unsigned long)fnic); + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); + INIT_WORK(&fnic->event_work, fnic_handle_event); + skb_queue_head_init(&fnic->fip_frame_queue); + INIT_LIST_HEAD(&fnic->evlist); + INIT_LIST_HEAD(&fnic->vlans); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); + fnic->ctlr.state = FIP_ST_NON_FIP; } fnic->state = FNIC_IN_FC_MODE; @@ -696,6 +850,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } fc_lport_init_stats(lp); + fnic->stats_reset_time = jiffies; fc_lport_config(lp); @@ -775,6 +930,7 @@ err_out_release_regions: err_out_disable_device: pci_disable_device(pdev); err_out_free_hba: + fnic_stats_debugfs_remove(fnic); scsi_host_put(lp->host); err_out: return err; @@ -807,6 +963,13 @@ static void fnic_remove(struct pci_dev *pdev) skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + del_timer_sync(&fnic->fip_timer); + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + fnic_fcoe_evlist_free(fnic); + } + /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work @@ -820,6 +983,7 @@ static void fnic_remove(struct pci_dev *pdev) fcoe_ctlr_destroy(&fnic->ctlr); fc_lport_destroy(lp); + fnic_stats_debugfs_remove(fnic); /* * This stops the fnic device, masks all interrupts. Completed @@ -847,7 +1011,6 @@ static void fnic_remove(struct pci_dev *pdev) fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); scsi_host_put(lp->host); } @@ -865,14 +1028,31 @@ static int __init fnic_init_module(void) printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); + /* Create debugfs entries for fnic */ + err = fnic_debugfs_init(); + if (err < 0) { + printk(KERN_ERR PFX "Failed to create fnic directory " + "for tracing and stats logging\n"); + fnic_debugfs_terminate(); + } + /* Allocate memory for trace buffer */ err = fnic_trace_buf_init(); if (err < 0) { - printk(KERN_ERR PFX "Trace buffer initialization Failed " - "Fnic Tracing utility is disabled\n"); + printk(KERN_ERR PFX + "Trace buffer initialization Failed. " + "Fnic Tracing utility is disabled\n"); fnic_trace_free(); } + /* Allocate memory for fc trace buffer */ + err = fnic_fc_trace_init(); + if (err < 0) { + printk(KERN_ERR PFX "FC trace buffer initialization Failed " + "FC frame tracing utility is disabled\n"); + fnic_fc_trace_free(); + } + /* Create a cache for allocation of default size sgls */ len = sizeof(struct fnic_dflt_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create @@ -889,8 +1069,8 @@ static int __init fnic_init_module(void) len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, - SLAB_HWCACHE_ALIGN, - NULL); + SLAB_HWCACHE_ALIGN, + NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; @@ -917,6 +1097,13 @@ static int __init fnic_init_module(void) spin_lock_init(&fnic_list_lock); INIT_LIST_HEAD(&fnic_list); + fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); + err = -ENOMEM; + goto err_create_fip_workq; + } + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); @@ -935,6 +1122,8 @@ static int __init fnic_init_module(void) err_pci_register: fc_release_transport(fnic_fc_transport); err_fc_transport: + destroy_workqueue(fnic_fip_queue); +err_create_fip_workq: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: kmem_cache_destroy(fnic_io_req_cache); @@ -944,6 +1133,8 @@ err_create_fnic_sgl_slab_max: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); err_create_fnic_sgl_slab_dflt: fnic_trace_free(); + fnic_fc_trace_free(); + fnic_debugfs_terminate(); return err; } @@ -951,11 +1142,17 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); + if (fnic_fip_queue) { + flush_workqueue(fnic_fip_queue); + destroy_workqueue(fnic_fip_queue); + } kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); fc_release_transport(fnic_fc_transport); fnic_trace_free(); + fnic_fc_trace_free(); + fnic_debugfs_terminate(); } module_init(fnic_init_module); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index be99e7549d8..ea28b5ca4c7 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, return &fnic->io_req_lock[hash]; } +static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, + int tag) +{ + return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; +} + /* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list. @@ -220,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic) if (!vnic_wq_copy_desc_avail(wq)) ret = -EAGAIN; - else + else { fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read( + &fnic->fnic_stats.fw_stats.active_fw_reqs)); + } spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); - if (!ret) + if (!ret) { + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Issued fw reset\n"); - else { + } else { fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Failed to issue fw reset\n"); @@ -285,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) fc_id, fnic->ctlr.map_dest, gw_mac); } + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + flogi_reg_ioreq_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); return ret; @@ -304,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; u8 pri_tag = 0; unsigned int i; unsigned long intr_flags; @@ -352,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, "fnic_queue_wq_copy_desc failure - no descriptors\n"); + atomic64_inc(&misc_stats->io_cpwq_alloc_failures); return SCSI_MLQUEUE_HOST_BUSY; } @@ -380,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, rport->maxframe_size, rp->r_a_tov, rp->e_d_tov); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); return 0; } @@ -395,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ struct fc_rport *rport; struct fnic_io_req *io_req = NULL; struct fnic *fnic = lport_priv(lp); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct vnic_wq_copy *wq; int ret; u64 cmd_trace; @@ -408,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ rport = starget_to_rport(scsi_target(sc->device)); ret = fc_remote_port_chkready(rport); if (ret) { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); sc->result = ret; done(sc); return 0; @@ -430,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ /* Get a new io_req for this SCSI IO */ io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.alloc_failures); ret = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -456,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type], GFP_ATOMIC); if (!io_req->sgl_list) { + atomic64_inc(&fnic_stats->io_stats.alloc_failures); ret = SCSI_MLQUEUE_HOST_BUSY; scsi_dma_unmap(sc); mempool_free(io_req, fnic->io_req_pool); @@ -503,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ mempool_free(io_req, fnic->io_req_pool); } } else { + atomic64_inc(&fnic_stats->io_stats.active_ios); + atomic64_inc(&fnic_stats->io_stats.num_ios); + if (atomic64_read(&fnic_stats->io_stats.active_ios) > + atomic64_read(&fnic_stats->io_stats.max_active_ios)) + atomic64_set(&fnic_stats->io_stats.max_active_ios, + atomic64_read(&fnic_stats->io_stats.active_ios)); + /* REVISIT: Use per IO lock in the final code */ CMD_FLAGS(sc) |= FNIC_IO_ISSUED; } @@ -536,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, struct fcpio_tag tag; int ret = 0; unsigned long flags; + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); + atomic64_inc(&reset_stats->fw_reset_completions); + /* Clean up all outstanding io requests */ fnic_cleanup_io(fnic, SCSI_NO_TAG); + atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); + atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); + spin_lock_irqsave(&fnic->fnic_lock, flags); /* fnic should be in FC_TRANS_ETH_MODE */ @@ -565,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, * reset the firmware. Free the cached flogi */ fnic->state = FNIC_IN_FC_MODE; + atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } } else { @@ -572,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, fnic->lport->host, "Unexpected state %s while processing" " reset cmpl\n", fnic_state_to_str(fnic->state)); + atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } @@ -695,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + fnic->fnic_stats.misc_stats.last_ack_time = jiffies; if (is_ack_index_in_range(wq, request_out)) { fnic->fw_ack_index[0] = request_out; fnic->fw_ack_recd[0] = 1; - } + } else + atomic64_inc( + &fnic->fnic_stats.misc_stats.ack_index_out_of_range); + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); FNIC_TRACE(fnic_fcpio_ack_handler, fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], @@ -720,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, struct fcpio_icmnd_cmpl *icmnd_cmpl; struct fnic_io_req *io_req; struct scsi_cmnd *sc; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; unsigned long flags; spinlock_t *io_lock; u64 cmd_trace; @@ -730,7 +782,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, fcpio_tag_id_dec(&tag, &id); icmnd_cmpl = &desc->u.icmnd_cmpl; - if (id >= FNIC_MAX_IO_REQ) { + if (id >= fnic->fnic_max_tag_id) { shost_printk(KERN_ERR, fnic->lport->host, "Tag out of range tag %x hdr status = %s\n", id, fnic_fcpio_status_to_str(hdr_status)); @@ -740,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, sc = scsi_host_find_tag(fnic->lport->host, id); WARN_ON_ONCE(!sc); if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); shost_printk(KERN_ERR, fnic->lport->host, "icmnd_cmpl sc is null - " "hdr status = %s tag = 0x%x desc = 0x%p\n", @@ -760,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL; spin_unlock_irqrestore(io_lock, flags); shost_printk(KERN_ERR, fnic->lport->host, @@ -818,63 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) xfer_len -= icmnd_cmpl->residual; - /* - * If queue_full, then try to reduce queue depth for all - * LUNS on the target. Todo: this should be accompanied - * by a periodic queue_depth rampup based on successful - * IO completion. - */ - if (icmnd_cmpl->scsi_status == QUEUE_FULL) { - struct scsi_device *t_sdev; - int qd = 0; - - shost_for_each_device(t_sdev, sc->device->host) { - if (t_sdev->id != sc->device->id) - continue; - - if (t_sdev->queue_depth > 1) { - qd = scsi_track_queue_full - (t_sdev, - t_sdev->queue_depth - 1); - if (qd == -1) - qd = t_sdev->host->cmd_per_lun; - shost_printk(KERN_INFO, - fnic->lport->host, - "scsi[%d:%d:%d:%d" - "] queue full detected," - "new depth = %d\n", - t_sdev->host->host_no, - t_sdev->channel, - t_sdev->id, t_sdev->lun, - t_sdev->queue_depth); - } - } - } + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) + atomic64_inc(&fnic_stats->misc_stats.queue_fulls); break; case FCPIO_TIMEOUT: /* request was timed out */ + atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout); sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_ABORTED: /* request was aborted */ + atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ + atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch); scsi_set_resid(sc, icmnd_cmpl->residual); sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; break; case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */ + atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources); sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status; break; - case FCPIO_INVALID_HEADER: /* header contains invalid data */ - case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ - case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ + case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */ + atomic64_inc(&fnic_stats->io_stats.io_not_found); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */ - case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ + atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + case FCPIO_FW_ERR: /* request was terminated due fw error */ + atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_MSS_INVALID: /* request was aborted due to mss error */ + atomic64_inc(&fnic_stats->misc_stats.mss_invalid); + sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; + break; + + case FCPIO_INVALID_HEADER: /* header contains invalid data */ + case FCPIO_INVALID_PARAM: /* some parameter in request invalid */ + case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ default: shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", fnic_fcpio_status_to_str(hdr_status)); @@ -882,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, break; } + if (hdr_status != FCPIO_SUCCESS) { + atomic64_inc(&fnic_stats->io_stats.io_failures); + shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + fnic_fcpio_status_to_str(hdr_status)); + } /* Break link with the SCSI command */ CMD_SP(sc) = NULL; CMD_FLAGS(sc) |= FNIC_IO_DONE; @@ -915,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, } else fnic->lport->host_stats.fcp_control_requests++; + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + /* Call SCSI completion function to complete the IO */ if (sc->scsi_done) sc->scsi_done(sc); @@ -932,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, u32 id; struct scsi_cmnd *sc; struct fnic_io_req *io_req; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; spinlock_t *io_lock; unsigned long start_time; @@ -939,7 +999,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); fcpio_tag_id_dec(&tag, &id); - if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { + if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { shost_printk(KERN_ERR, fnic->lport->host, "Tag out of range tag %x hdr status = %s\n", id, fnic_fcpio_status_to_str(hdr_status)); @@ -949,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); WARN_ON_ONCE(!sc); if (!sc) { + atomic64_inc(&fnic_stats->io_stats.sc_null); shost_printk(KERN_ERR, fnic->lport->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", fnic_fcpio_status_to_str(hdr_status), id); @@ -959,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, io_req = (struct fnic_io_req *)CMD_SP(sc); WARN_ON_ONCE(!io_req); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; shost_printk(KERN_ERR, fnic->lport->host, @@ -983,15 +1045,48 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, spin_unlock_irqrestore(io_lock, flags); } else if (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */ + switch (hdr_status) { + case FCPIO_SUCCESS: + break; + case FCPIO_TIMEOUT: + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_fw_timeouts); + else + atomic64_inc( + &term_stats->terminate_fw_timeouts); + break; + case FCPIO_IO_NOT_FOUND: + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_io_not_found); + else + atomic64_inc( + &term_stats->terminate_io_not_found); + break; + default: + if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) + atomic64_inc(&abts_stats->abort_failures); + else + atomic64_inc( + &term_stats->terminate_failures); + break; + } if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) { /* This is a late completion. Ignore it */ spin_unlock_irqrestore(io_lock, flags); return; } - CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; CMD_ABTS_STATUS(sc) = hdr_status; - CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; + + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); + + if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) + atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "abts cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), @@ -1095,6 +1190,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, struct fnic *fnic = vnic_dev_priv(vdev); switch (desc->hdr.type) { + case FCPIO_ICMND_CMPL: /* fw completed a command */ + case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ + case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ + case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ + case FCPIO_RESET_CMPL: /* fw completed reset */ + atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); + break; + default: + break; + } + + switch (desc->hdr.type) { case FCPIO_ACK: /* fw copied copy wq desc to its queue */ fnic_fcpio_ack_handler(fnic, cq_index, desc); break; @@ -1148,23 +1255,26 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) { - unsigned int i; + int i; struct fnic_io_req *io_req; unsigned long flags = 0; struct scsi_cmnd *sc; spinlock_t *io_lock; unsigned long start_time = 0; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; - for (i = 0; i < FNIC_MAX_IO_REQ; i++) { + for (i = 0; i < fnic->fnic_max_tag_id; i++) { if (i == exclude_id) continue; + io_lock = fnic_io_lock_tag(fnic, i); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, i); - if (!sc) + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); continue; + } - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { @@ -1202,8 +1312,14 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) cleanup_scsi_cmd: sc->result = DID_TRANSPORT_DISRUPTED << 16; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" - " DID_TRANSPORT_DISRUPTED\n"); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n", + __func__, (jiffies - start_time)); + + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); /* Complete the command to SCSI */ if (sc->scsi_done) { @@ -1236,7 +1352,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, fcpio_tag_id_dec(&desc->hdr.tag, &id); id &= FNIC_TAG_MASK; - if (id >= FNIC_MAX_IO_REQ) + if (id >= fnic->fnic_max_tag_id) return; sc = scsi_host_find_tag(fnic->lport->host, id); @@ -1288,6 +1404,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct Scsi_Host *host = fnic->lport->host; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; spin_lock_irqsave(host->host_lock, flags); @@ -1309,12 +1426,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, atomic_dec(&fnic->in_flight); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_queue_abort_io_req: failure: no descriptors\n"); + atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); return 1; } fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, 0, task_req, tag, fc_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); atomic_dec(&fnic->in_flight); @@ -1325,10 +1449,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) { int tag; int abt_tag; + int term_cnt = 0; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; + struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; + struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct scsi_lun fc_lun; enum fnic_ioreq_state old_ioreq_state; @@ -1340,14 +1467,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) if (fnic->in_remove) return; - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { abt_tag = tag; + io_lock = fnic_io_lock_tag(fnic, tag); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, tag); - if (!sc) + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); continue; - - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + } io_req = (struct fnic_io_req *)CMD_SP(sc); @@ -1391,6 +1519,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + atomic64_inc(&reset_stats->device_reset_terminates); abt_tag = (tag | FNIC_TAG_DEV_RST); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_rport_exch_reset dev rst sc 0x%p\n", @@ -1427,8 +1556,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) else CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&term_stats->terminates); + term_cnt++; } } + if (term_cnt > atomic64_read(&term_stats->max_terminates)) + atomic64_set(&term_stats->max_terminates, term_cnt); } @@ -1436,17 +1569,37 @@ void fnic_terminate_rport_io(struct fc_rport *rport) { int tag; int abt_tag; + int term_cnt = 0; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; struct scsi_cmnd *sc; struct scsi_lun fc_lun; - struct fc_rport_libfc_priv *rdata = rport->dd_data; - struct fc_lport *lport = rdata->local_port; - struct fnic *fnic = lport_priv(lport); + struct fc_rport_libfc_priv *rdata; + struct fc_lport *lport; + struct fnic *fnic; struct fc_rport *cmd_rport; + struct reset_stats *reset_stats; + struct terminate_stats *term_stats; enum fnic_ioreq_state old_ioreq_state; + if (!rport) { + printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + return; + } + rdata = rport->dd_data; + + if (!rdata) { + printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); + return; + } + lport = rdata->local_port; + + if (!lport) { + printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); + return; + } + fnic = lport_priv(lport); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io called" " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", @@ -1456,18 +1609,24 @@ void fnic_terminate_rport_io(struct fc_rport *rport) if (fnic->in_remove) return; - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + reset_stats = &fnic->fnic_stats.reset_stats; + term_stats = &fnic->fnic_stats.term_stats; + + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { abt_tag = tag; + io_lock = fnic_io_lock_tag(fnic, tag); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, tag); - if (!sc) + if (!sc) { + spin_unlock_irqrestore(io_lock, flags); continue; + } cmd_rport = starget_to_rport(scsi_target(sc->device)); - if (rport != cmd_rport) + if (rport != cmd_rport) { + spin_unlock_irqrestore(io_lock, flags); continue; - - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + } io_req = (struct fnic_io_req *)CMD_SP(sc); @@ -1509,6 +1668,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport) CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { + atomic64_inc(&reset_stats->device_reset_terminates); abt_tag = (tag | FNIC_TAG_DEV_RST); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_terminate_rport_io dev rst sc 0x%p\n", sc); @@ -1545,8 +1705,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport) else CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED; spin_unlock_irqrestore(io_lock, flags); + atomic64_inc(&term_stats->terminates); + term_cnt++; } } + if (term_cnt > atomic64_read(&term_stats->max_terminates)) + atomic64_set(&term_stats->max_terminates, term_cnt); } @@ -1567,6 +1731,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) int ret = SUCCESS; u32 task_req = 0; struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct abort_stats *abts_stats; + struct terminate_stats *term_stats; + enum fnic_ioreq_state old_ioreq_state; int tag; DECLARE_COMPLETION_ONSTACK(tm_done); @@ -1577,6 +1745,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) lp = shost_priv(sc->device->host); fnic = lport_priv(lp); + fnic_stats = &fnic->fnic_stats; + abts_stats = &fnic->fnic_stats.abts_stats; + term_stats = &fnic->fnic_stats.term_stats; + rport = starget_to_rport(scsi_target(sc->device)); tag = sc->request->tag; FNIC_SCSI_DBG(KERN_DEBUG, @@ -1623,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) * the completion wont be done till mid-layer, since abort * has already started. */ + old_ioreq_state = CMD_STATE(sc); CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; @@ -1635,8 +1808,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) */ if (fc_remote_port_chkready(rport) == 0) task_req = FCPIO_ITMF_ABT_TASK; - else + else { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); task_req = FCPIO_ITMF_ABT_TASK_TERM; + } /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); @@ -1644,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, fc_lun.scsi_lun, io_req)) { spin_lock_irqsave(io_lock, flags); + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = old_ioreq_state; io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->abts_done = NULL; @@ -1651,10 +1828,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) ret = FAILED; goto fnic_abort_cmd_end; } - if (task_req == FCPIO_ITMF_ABT_TASK) + if (task_req == FCPIO_ITMF_ABT_TASK) { CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; - else + atomic64_inc(&fnic_stats->abts_stats.aborts); + } else { CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; + atomic64_inc(&fnic_stats->term_stats.terminates); + } /* * We queued an abort IO, wait for its completion. @@ -1672,6 +1852,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) io_req = (struct fnic_io_req *)CMD_SP(sc); if (!io_req) { + atomic64_inc(&fnic_stats->io_stats.ioreq_null); spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL; ret = FAILED; @@ -1680,13 +1861,20 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) io_req->abts_done = NULL; /* fw did not complete abort, timed out */ - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { spin_unlock_irqrestore(io_lock, flags); + if (task_req == FCPIO_ITMF_ABT_TASK) { + atomic64_inc(&abts_stats->abort_drv_timeouts); + } else { + atomic64_inc(&term_stats->terminate_drv_timeouts); + } CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; ret = FAILED; goto fnic_abort_cmd_end; } + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; + /* * firmware completed the abort, check the status, * free the io_req irrespective of failure or success @@ -1724,6 +1912,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, { struct vnic_wq_copy *wq = &fnic->wq_copy[0]; struct Scsi_Host *host = fnic->lport->host; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; struct scsi_lun fc_lun; int ret = 0; unsigned long intr_flags; @@ -1745,6 +1934,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, if (!vnic_wq_copy_desc_avail(wq)) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "queue_dr_io_req failure - no descriptors\n"); + atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); ret = -EAGAIN; goto lr_io_req_end; } @@ -1757,6 +1947,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); + atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); + if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > + atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) + atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, + atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); + lr_io_req_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags); atomic_dec(&fnic->in_flight); @@ -1784,17 +1980,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, DECLARE_COMPLETION_ONSTACK(tm_done); enum fnic_ioreq_state old_ioreq_state; - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { + io_lock = fnic_io_lock_tag(fnic, tag); + spin_lock_irqsave(io_lock, flags); sc = scsi_host_find_tag(fnic->lport->host, tag); /* * ignore this lun reset cmd or cmds that do not belong to * this lun */ - if (!sc || sc == lr_sc || sc->device != lun_dev) + if (!sc || sc == lr_sc || sc->device != lun_dev) { + spin_unlock_irqrestore(io_lock, flags); continue; - - io_lock = fnic_io_lock_hash(fnic, sc); - spin_lock_irqsave(io_lock, flags); + } io_req = (struct fnic_io_req *)CMD_SP(sc); @@ -1823,6 +2020,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, spin_unlock_irqrestore(io_lock, flags); continue; } + + if (io_req->abts_done) + shost_printk(KERN_ERR, fnic->lport->host, + "%s: io_req->abts_done is set state is %s\n", + __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); old_ioreq_state = CMD_STATE(sc); /* * Any pending IO issued prior to reset is expected to be @@ -1833,11 +2035,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, */ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; - if (io_req->abts_done) - shost_printk(KERN_ERR, fnic->lport->host, - "%s: io_req->abts_done is set state is %s\n", - __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); - BUG_ON(io_req->abts_done); abt_tag = tag; @@ -1890,12 +2087,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, io_req->abts_done = NULL; /* if abort is still pending with fw, fail */ - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { + if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { spin_unlock_irqrestore(io_lock, flags); CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; ret = 1; goto clean_pending_aborts_end; } + CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; CMD_SP(sc) = NULL; spin_unlock_irqrestore(io_lock, flags); @@ -1989,6 +2187,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) unsigned long flags; unsigned long start_time = 0; struct scsi_lun fc_lun; + struct fnic_stats *fnic_stats; + struct reset_stats *reset_stats; int tag = 0; DECLARE_COMPLETION_ONSTACK(tm_done); int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ @@ -2000,6 +2200,10 @@ int fnic_device_reset(struct scsi_cmnd *sc) lp = shost_priv(sc->device->host); fnic = lport_priv(lp); + fnic_stats = &fnic->fnic_stats; + reset_stats = &fnic->fnic_stats.reset_stats; + + atomic64_inc(&reset_stats->device_resets); rport = starget_to_rport(scsi_target(sc->device)); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, @@ -2010,8 +2214,10 @@ int fnic_device_reset(struct scsi_cmnd *sc) goto fnic_device_reset_end; /* Check if remote port up */ - if (fc_remote_port_chkready(rport)) + if (fc_remote_port_chkready(rport)) { + atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); goto fnic_device_reset_end; + } CMD_FLAGS(sc) = FNIC_DEVICE_RESET; /* Allocate tag if not present */ @@ -2087,14 +2293,15 @@ int fnic_device_reset(struct scsi_cmnd *sc) * gets cleaned up during higher levels of EH */ if (status == FCPIO_INVALID_CODE) { + atomic64_inc(&reset_stats->device_reset_timeouts); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Device reset timed out\n"); CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; spin_unlock_irqrestore(io_lock, flags); int_to_scsilun(sc->device->lun, &fc_lun); /* - * Issue abort and terminate on the device reset request. - * If q'ing of the abort fails, retry issue it after a delay. + * Issue abort and terminate on device reset request. + * If q'ing of terminate fails, retry it after a delay. */ while (1) { spin_lock_irqsave(io_lock, flags); @@ -2200,6 +2407,10 @@ fnic_device_reset_end: "Returning from device reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); + + if (ret == FAILED) + atomic64_inc(&reset_stats->device_reset_failures); + return ret; } @@ -2208,26 +2419,34 @@ int fnic_reset(struct Scsi_Host *shost) { struct fc_lport *lp; struct fnic *fnic; - int ret = SUCCESS; + int ret = 0; + struct reset_stats *reset_stats; lp = shost_priv(shost); fnic = lport_priv(lp); + reset_stats = &fnic->fnic_stats.reset_stats; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_reset called\n"); + atomic64_inc(&reset_stats->fnic_resets); + /* * Reset local port, this will clean up libFC exchanges, * reset remote port sessions, and if link is up, begin flogi */ - if (lp->tt.lport_reset(lp)) - ret = FAILED; + ret = lp->tt.lport_reset(lp); FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Returning from fnic reset %s\n", - (ret == SUCCESS) ? + (ret == 0) ? "SUCCESS" : "FAILED"); + if (ret == 0) + atomic64_inc(&reset_stats->fnic_reset_completions); + else + atomic64_inc(&reset_stats->fnic_reset_failures); + return ret; } @@ -2252,7 +2471,7 @@ int fnic_host_reset(struct scsi_cmnd *sc) * scsi-ml tries to send a TUR to every device if host reset is * successful, so before returning to scsi, fabric should be up */ - ret = fnic_reset(shost); + ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; if (ret == SUCCESS) { wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; ret = FAILED; @@ -2405,7 +2624,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) lun_dev = lr_sc->device; /* walk again to check, if IOs are still pending in fw */ - for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { + for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { sc = scsi_host_find_tag(fnic->lport->host, tag); /* * ignore this lun reset cmd or cmds that do not belong to @@ -2432,11 +2651,9 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) "Found IO in %s on lun\n", fnic_ioreq_state_to_str(CMD_STATE(sc))); - if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { - spin_unlock_irqrestore(io_lock, flags); + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) ret = 1; - continue; - } + spin_unlock_irqrestore(io_lock, flags); } return ret; diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h new file mode 100644 index 00000000000..540cceb843c --- /dev/null +++ b/drivers/scsi/fnic/fnic_stats.h @@ -0,0 +1,116 @@ +/* + * Copyright 2013 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FNIC_STATS_H_ +#define _FNIC_STATS_H_ +struct io_path_stats { + atomic64_t active_ios; + atomic64_t max_active_ios; + atomic64_t io_completions; + atomic64_t io_failures; + atomic64_t ioreq_null; + atomic64_t alloc_failures; + atomic64_t sc_null; + atomic64_t io_not_found; + atomic64_t num_ios; +}; + +struct abort_stats { + atomic64_t aborts; + atomic64_t abort_failures; + atomic64_t abort_drv_timeouts; + atomic64_t abort_fw_timeouts; + atomic64_t abort_io_not_found; +}; + +struct terminate_stats { + atomic64_t terminates; + atomic64_t max_terminates; + atomic64_t terminate_drv_timeouts; + atomic64_t terminate_fw_timeouts; + atomic64_t terminate_io_not_found; + atomic64_t terminate_failures; +}; + +struct reset_stats { + atomic64_t device_resets; + atomic64_t device_reset_failures; + atomic64_t device_reset_aborts; + atomic64_t device_reset_timeouts; + atomic64_t device_reset_terminates; + atomic64_t fw_resets; + atomic64_t fw_reset_completions; + atomic64_t fw_reset_failures; + atomic64_t fnic_resets; + atomic64_t fnic_reset_completions; + atomic64_t fnic_reset_failures; +}; + +struct fw_stats { + atomic64_t active_fw_reqs; + atomic64_t max_fw_reqs; + atomic64_t fw_out_of_resources; + atomic64_t io_fw_errs; +}; + +struct vlan_stats { + atomic64_t vlan_disc_reqs; + atomic64_t resp_withno_vlanID; + atomic64_t sol_expiry_count; + atomic64_t flogi_rejects; +}; + +struct misc_stats { + u64 last_isr_time; + u64 last_ack_time; + atomic64_t isr_count; + atomic64_t max_cq_entries; + atomic64_t ack_index_out_of_range; + atomic64_t data_count_mismatch; + atomic64_t fcpio_timeout; + atomic64_t fcpio_aborted; + atomic64_t sgl_invalid; + atomic64_t mss_invalid; + atomic64_t abts_cpwq_alloc_failures; + atomic64_t devrst_cpwq_alloc_failures; + atomic64_t io_cpwq_alloc_failures; + atomic64_t no_icmnd_itmf_cmpls; + atomic64_t queue_fulls; + atomic64_t rport_not_ready; + atomic64_t frame_errors; +}; + +struct fnic_stats { + struct io_path_stats io_stats; + struct abort_stats abts_stats; + struct terminate_stats term_stats; + struct reset_stats reset_stats; + struct fw_stats fw_stats; + struct vlan_stats vlan_stats; + struct misc_stats misc_stats; +}; + +struct stats_debug_info { + char *debug_buffer; + void *i_private; + int buf_size; + int buffer_len; +}; + +int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); +int fnic_stats_debugfs_init(struct fnic *); +void fnic_stats_debugfs_remove(struct fnic *); +#endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 23a60e3d852..c7728592682 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -20,6 +20,7 @@ #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> +#include <linux/time.h> #include "fnic_io.h" #include "fnic.h" @@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock); static fnic_trace_dbg_t fnic_trace_entries; int fnic_tracing_enabled = 1; +/* static char *fnic_fc_ctlr_trace_buf_p; */ + +static int fc_trace_max_entries; +static unsigned long fnic_fc_ctlr_trace_buf_p; +static fnic_trace_dbg_t fc_trace_entries; +int fnic_fc_tracing_enabled = 1; +int fnic_fc_trace_cleared = 1; +static DEFINE_SPINLOCK(fnic_fc_trace_lock); + + /* * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information * @@ -189,6 +200,191 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) } /* + * fnic_get_stats_data - Copy fnic stats buffer to a memory file + * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer + * + * Description: + * This routine gathers the fnic stats debugfs data from the fnic_stats struct + * and dumps it to stats_debug_info. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into + * stats_debug_info + */ +int fnic_get_stats_data(struct stats_debug_info *debug, + struct fnic_stats *stats) +{ + int len = 0; + int buf_size = debug->buf_size; + struct timespec val1, val2; + + len = snprintf(debug->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\tIO Statistics\n" + "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" + "Number of IOs: %lld\nNumber of IO Completions: %lld\n" + "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" + "Number of Memory alloc Failures: %lld\n" + "Number of IOREQ Null: %lld\n" + "Number of SCSI cmd pointer Null: %lld\n", + (u64)atomic64_read(&stats->io_stats.active_ios), + (u64)atomic64_read(&stats->io_stats.max_active_ios), + (u64)atomic64_read(&stats->io_stats.num_ios), + (u64)atomic64_read(&stats->io_stats.io_completions), + (u64)atomic64_read(&stats->io_stats.io_failures), + (u64)atomic64_read(&stats->io_stats.io_not_found), + (u64)atomic64_read(&stats->io_stats.alloc_failures), + (u64)atomic64_read(&stats->io_stats.ioreq_null), + (u64)atomic64_read(&stats->io_stats.sc_null)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tAbort Statistics\n" + "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Aborts: %lld\n" + "Number of Abort Failures: %lld\n" + "Number of Abort Driver Timeouts: %lld\n" + "Number of Abort FW Timeouts: %lld\n" + "Number of Abort IO NOT Found: %lld\n", + (u64)atomic64_read(&stats->abts_stats.aborts), + (u64)atomic64_read(&stats->abts_stats.abort_failures), + (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), + (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), + (u64)atomic64_read(&stats->abts_stats.abort_io_not_found)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tTerminate Statistics\n" + "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Terminates: %lld\n" + "Maximum Terminates: %lld\n" + "Number of Terminate Driver Timeouts: %lld\n" + "Number of Terminate FW Timeouts: %lld\n" + "Number of Terminate IO NOT Found: %lld\n" + "Number of Terminate Failures: %lld\n", + (u64)atomic64_read(&stats->term_stats.terminates), + (u64)atomic64_read(&stats->term_stats.max_terminates), + (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), + (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), + (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), + (u64)atomic64_read(&stats->term_stats.terminate_failures)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tReset Statistics\n" + "------------------------------------------\n"); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Device Resets: %lld\n" + "Number of Device Reset Failures: %lld\n" + "Number of Device Reset Aborts: %lld\n" + "Number of Device Reset Timeouts: %lld\n" + "Number of Device Reset Terminates: %lld\n" + "Number of FW Resets: %lld\n" + "Number of FW Reset Completions: %lld\n" + "Number of FW Reset Failures: %lld\n" + "Number of Fnic Reset: %lld\n" + "Number of Fnic Reset Completions: %lld\n" + "Number of Fnic Reset Failures: %lld\n", + (u64)atomic64_read(&stats->reset_stats.device_resets), + (u64)atomic64_read(&stats->reset_stats.device_reset_failures), + (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), + (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), + (u64)atomic64_read( + &stats->reset_stats.device_reset_terminates), + (u64)atomic64_read(&stats->reset_stats.fw_resets), + (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), + (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), + (u64)atomic64_read(&stats->reset_stats.fnic_resets), + (u64)atomic64_read( + &stats->reset_stats.fnic_reset_completions), + (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tFirmware Statistics\n" + "------------------------------------------\n"); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Active FW Requests %lld\n" + "Maximum FW Requests: %lld\n" + "Number of FW out of resources: %lld\n" + "Number of FW IO errors: %lld\n", + (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), + (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), + (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), + (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tVlan Discovery Statistics\n" + "------------------------------------------\n"); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Number of Vlan Discovery Requests Sent %lld\n" + "Vlan Response Received with no FCF VLAN ID: %lld\n" + "No solicitations recvd after vlan set, expiry count: %lld\n" + "Flogi rejects count: %lld\n", + (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), + (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), + (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), + (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\n------------------------------------------\n" + "\t\tOther Important Statistics\n" + "------------------------------------------\n"); + + jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1); + jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Last ISR time: %llu (%8lu.%8lu)\n" + "Last ACK time: %llu (%8lu.%8lu)\n" + "Number of ISRs: %lld\n" + "Maximum CQ Entries: %lld\n" + "Number of ACK index out of range: %lld\n" + "Number of data count mismatch: %lld\n" + "Number of FCPIO Timeouts: %lld\n" + "Number of FCPIO Aborted: %lld\n" + "Number of SGL Invalid: %lld\n" + "Number of Copy WQ Alloc Failures for ABTs: %lld\n" + "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" + "Number of Copy WQ Alloc Failures for IOs: %lld\n" + "Number of no icmnd itmf Completions: %lld\n" + "Number of QUEUE Fulls: %lld\n" + "Number of rport not ready: %lld\n" + "Number of receive frame errors: %lld\n", + (u64)stats->misc_stats.last_isr_time, + val1.tv_sec, val1.tv_nsec, + (u64)stats->misc_stats.last_ack_time, + val2.tv_sec, val2.tv_nsec, + (u64)atomic64_read(&stats->misc_stats.isr_count), + (u64)atomic64_read(&stats->misc_stats.max_cq_entries), + (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), + (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), + (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), + (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), + (u64)atomic64_read(&stats->misc_stats.sgl_invalid), + (u64)atomic64_read( + &stats->misc_stats.abts_cpwq_alloc_failures), + (u64)atomic64_read( + &stats->misc_stats.devrst_cpwq_alloc_failures), + (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), + (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), + (u64)atomic64_read(&stats->misc_stats.queue_fulls), + (u64)atomic64_read(&stats->misc_stats.rport_not_ready), + (u64)atomic64_read(&stats->misc_stats.frame_errors)); + + return len; + +} + +/* * fnic_trace_buf_init - Initialize fnic trace buffer logging facility * * Description: @@ -243,10 +439,10 @@ int fnic_trace_buf_init(void) } err = fnic_trace_debugfs_init(); if (err < 0) { - printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n"); + pr_err("fnic: Failed to initialize debugfs for tracing\n"); goto err_fnic_trace_debugfs_init; } - printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n"); + pr_info("fnic: Successfully Initialized Trace Buffer\n"); return err; err_fnic_trace_debugfs_init: fnic_trace_free(); @@ -271,3 +467,314 @@ void fnic_trace_free(void) } printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); } + +/* + * fnic_fc_ctlr_trace_buf_init - + * Initialize trace buffer to log fnic control frames + * Description: + * Initialize trace buffer data structure by allocating + * required memory for trace data as well as for Indexes. + * Frame size is 256 bytes and + * memory is allocated for 1024 entries of 256 bytes. + * Page_offset(Index) is set to the address of trace entry + * and page_offset is initialized by adding frame size + * to the previous page_offset entry. + */ + +int fnic_fc_trace_init(void) +{ + unsigned long fc_trace_buf_head; + int err = 0; + int i; + + fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ + FC_TRC_SIZE_BYTES; + fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc( + fnic_fc_trace_max_pages * PAGE_SIZE); + if (!fnic_fc_ctlr_trace_buf_p) { + pr_err("fnic: Failed to allocate memory for " + "FC Control Trace Buf\n"); + err = -ENOMEM; + goto err_fnic_fc_ctlr_trace_buf_init; + } + + memset((void *)fnic_fc_ctlr_trace_buf_p, 0, + fnic_fc_trace_max_pages * PAGE_SIZE); + + /* Allocate memory for page offset */ + fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries * + sizeof(unsigned long)); + if (!fc_trace_entries.page_offset) { + pr_err("fnic:Failed to allocate memory for page_offset\n"); + if (fnic_fc_ctlr_trace_buf_p) { + pr_err("fnic: Freeing FC Control Trace Buf\n"); + vfree((void *)fnic_fc_ctlr_trace_buf_p); + fnic_fc_ctlr_trace_buf_p = 0; + } + err = -ENOMEM; + goto err_fnic_fc_ctlr_trace_buf_init; + } + memset((void *)fc_trace_entries.page_offset, 0, + (fc_trace_max_entries * sizeof(unsigned long))); + + fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; + fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; + + /* + * Set up fc_trace_entries.page_offset field with memory location + * for every trace entry + */ + for (i = 0; i < fc_trace_max_entries; i++) { + fc_trace_entries.page_offset[i] = fc_trace_buf_head; + fc_trace_buf_head += FC_TRC_SIZE_BYTES; + } + err = fnic_fc_trace_debugfs_init(); + if (err < 0) { + pr_err("fnic: Failed to initialize FC_CTLR tracing.\n"); + goto err_fnic_fc_ctlr_trace_debugfs_init; + } + pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); + return err; + +err_fnic_fc_ctlr_trace_debugfs_init: + fnic_fc_trace_free(); +err_fnic_fc_ctlr_trace_buf_init: + return err; +} + +/* + * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. + */ +void fnic_fc_trace_free(void) +{ + fnic_fc_tracing_enabled = 0; + fnic_fc_trace_debugfs_terminate(); + if (fc_trace_entries.page_offset) { + vfree((void *)fc_trace_entries.page_offset); + fc_trace_entries.page_offset = NULL; + } + if (fnic_fc_ctlr_trace_buf_p) { + vfree((void *)fnic_fc_ctlr_trace_buf_p); + fnic_fc_ctlr_trace_buf_p = 0; + } + pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); +} + +/* + * fnic_fc_ctlr_set_trace_data: + * Maintain rd & wr idx accordingly and set data + * Passed parameters: + * host_no: host number accociated with fnic + * frame_type: send_frame, rece_frame or link event + * fc_frame: pointer to fc_frame + * frame_len: Length of the fc_frame + * Description: + * This routine will get next available wr_idx and + * copy all passed trace data to the buffer pointed by wr_idx + * and increment wr_idx. It will also make sure that we dont + * overwrite the entry which we are reading and also + * wrap around if we reach the maximum entries. + * Returned Value: + * It will return 0 for success or -1 for failure + */ +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, + char *frame, u32 fc_trc_frame_len) +{ + unsigned long flags; + struct fc_trace_hdr *fc_buf; + unsigned long eth_fcoe_hdr_len; + char *fc_trace; + + if (fnic_fc_tracing_enabled == 0) + return 0; + + spin_lock_irqsave(&fnic_fc_trace_lock, flags); + + if (fnic_fc_trace_cleared == 1) { + fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; + pr_info("fnic: Reseting the read idx\n"); + memset((void *)fnic_fc_ctlr_trace_buf_p, 0, + fnic_fc_trace_max_pages * PAGE_SIZE); + fnic_fc_trace_cleared = 0; + } + + fc_buf = (struct fc_trace_hdr *) + fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; + + fc_trace_entries.wr_idx++; + + if (fc_trace_entries.wr_idx >= fc_trace_max_entries) + fc_trace_entries.wr_idx = 0; + + if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { + fc_trace_entries.rd_idx++; + if (fc_trace_entries.rd_idx >= fc_trace_max_entries) + fc_trace_entries.rd_idx = 0; + } + + fc_buf->time_stamp = CURRENT_TIME; + fc_buf->host_no = host_no; + fc_buf->frame_type = frame_type; + + fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); + + /* During the receive path, we do not have eth hdr as well as fcoe hdr + * at trace entry point so we will stuff 0xff just to make it generic. + */ + if (frame_type == FNIC_FC_RECV) { + eth_fcoe_hdr_len = sizeof(struct ethhdr) + + sizeof(struct fcoe_hdr); + fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len; + memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); + /* Copy the rest of data frame */ + memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, + min_t(u8, fc_trc_frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); + } else { + memcpy((char *)fc_trace, (void *)frame, + min_t(u8, fc_trc_frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); + } + + /* Store the actual received length */ + fc_buf->frame_len = fc_trc_frame_len; + + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return 0; +} + +/* + * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file + * Passed parameter: + * @fnic_dbgfs_t: pointer to debugfs trace buffer + * rdata_flag: 1 => Unformated file + * 0 => formated file + * Description: + * This routine will copy the trace data to memory file with + * proper formatting and also copy to another memory + * file without formatting for further procesing. + * Retrun Value: + * Number of bytes that were dumped into fnic_dbgfs_t + */ + +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) +{ + int rd_idx, wr_idx; + unsigned long flags; + int len = 0, j; + struct fc_trace_hdr *tdata; + char *fc_trace; + + spin_lock_irqsave(&fnic_fc_trace_lock, flags); + if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + pr_info("fnic: Buffer is empty\n"); + return 0; + } + rd_idx = fc_trace_entries.rd_idx; + wr_idx = fc_trace_entries.wr_idx; + if (rdata_flag == 0) { + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + "Time Stamp (UTC)\t\t" + "Host No: F Type: len: FCoE_FRAME:\n"); + } + + while (rd_idx != wr_idx) { + tdata = (struct fc_trace_hdr *) + fc_trace_entries.page_offset[rd_idx]; + if (!tdata) { + pr_info("fnic: Rd data is NULL\n"); + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return 0; + } + if (rdata_flag == 0) { + copy_and_format_trace_data(tdata, + fnic_dbgfs_prt, &len, rdata_flag); + } else { + fc_trace = (char *)tdata; + for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) + - len, "%02x", fc_trace[j] & 0xff); + } /* for loop */ + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + "\n"); + } + rd_idx++; + if (rd_idx > (fc_trace_max_entries - 1)) + rd_idx = 0; + } + + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return len; +} + +/* + * copy_and_format_trace_data: Copy formatted data to char * buffer + * Passed Parameter: + * @fc_trace_hdr_t: pointer to trace data + * @fnic_dbgfs_t: pointer to debugfs trace buffer + * @orig_len: pointer to len + * rdata_flag: 0 => Formated file, 1 => Unformated file + * Description: + * This routine will format and copy the passed trace data + * for formated file or unformated file accordingly. + */ + +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, + fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, + u8 rdata_flag) +{ + struct tm tm; + int j, i = 1, len; + char *fc_trace, *fmt; + int ethhdr_len = sizeof(struct ethhdr) - 1; + int fcoehdr_len = sizeof(struct fcoe_hdr); + int fchdr_len = sizeof(struct fc_frame_header); + int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; + + tdata->frame_type = tdata->frame_type & 0x7F; + + len = *orig_len; + + time_to_tm(tdata->time_stamp.tv_sec, 0, &tm); + + fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + fmt, + tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, + tm.tm_hour, tm.tm_min, tm.tm_sec, + tdata->time_stamp.tv_nsec, tdata->host_no, + tdata->frame_type, tdata->frame_len); + + fc_trace = (char *)FC_TRACE_ADDRESS(tdata); + + for (j = 0; j < min_t(u8, tdata->frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { + if (tdata->frame_type == FNIC_FC_LE) { + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "%c", fc_trace[j]); + } else { + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "%02x", fc_trace[j] & 0xff); + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, " "); + if (j == ethhdr_len || + j == ethhdr_len + fcoehdr_len || + j == ethhdr_len + fcoehdr_len + fchdr_len || + (i > 3 && j%fchdr_len == 0)) { + len += snprintf(fnic_dbgfs_prt->buffer + + len, (fnic_fc_trace_max_pages + * PAGE_SIZE * 3) - len, + "\n\t\t\t\t\t\t\t\t"); + i++; + } + } /* end of else*/ + } /* End of for loop*/ + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "\n"); + *orig_len = len; +} diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h index cef42b4c4d6..a8aa0578fcb 100644 --- a/drivers/scsi/fnic/fnic_trace.h +++ b/drivers/scsi/fnic/fnic_trace.h @@ -19,6 +19,17 @@ #define __FNIC_TRACE_H__ #define FNIC_ENTRY_SIZE_BYTES 64 +#define FC_TRC_SIZE_BYTES 256 +#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr) + +/* + * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type + * of frame 1 => Eth frame, 0=> FC frame + */ + +#define FNIC_FC_RECV 0x52 /* Character R */ +#define FNIC_FC_SEND 0x54 /* Character T */ +#define FNIC_FC_LE 0x4C /* Character L */ extern ssize_t simple_read_from_buffer(void __user *to, size_t count, @@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages; extern int fnic_tracing_enabled; extern unsigned int trace_max_pages; +extern unsigned int fnic_fc_trace_max_pages; +extern int fnic_fc_tracing_enabled; +extern int fnic_fc_trace_cleared; + typedef struct fnic_trace_dbg { int wr_idx; int rd_idx; @@ -56,6 +71,16 @@ struct fnic_trace_data { typedef struct fnic_trace_data fnic_trace_data_t; +struct fc_trace_hdr { + struct timespec time_stamp; + u32 host_no; + u8 frame_type; + u8 frame_len; +} __attribute__((__packed__)); + +#define FC_TRACE_ADDRESS(a) \ + ((unsigned long)(a) + sizeof(struct fc_trace_hdr)) + #define FNIC_TRACE_ENTRY_SIZE \ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) @@ -84,7 +109,21 @@ fnic_trace_data_t *fnic_trace_get_buf(void); int fnic_get_trace_data(fnic_dbgfs_t *); int fnic_trace_buf_init(void); void fnic_trace_free(void); +int fnic_debugfs_init(void); +void fnic_debugfs_terminate(void); int fnic_trace_debugfs_init(void); void fnic_trace_debugfs_terminate(void); +/* Fnic FC CTLR Trace releated function */ +int fnic_fc_trace_init(void); +void fnic_fc_trace_free(void); +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, + char *frame, u32 fc_frame_len); +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag); +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, + fnic_dbgfs_t *fnic_dbgfs_prt, + int *len, u8 rdata_flag); +int fnic_fc_trace_debugfs_init(void); +void fnic_fc_trace_debugfs_terminate(void); + #endif diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index b576be734e2..9795d6f3e19 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg) return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); } +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan) +{ + u64 a0 = new_default_vlan, a1 = 0; + int wait = 1000; + int old_vlan = 0; + + old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait); + return (u16)old_vlan; +} + int vnic_dev_link_status(struct vnic_dev *vdev) { if (vdev->linkstatus) diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h index f9935a8a5a0..40d4195f562 100644 --- a/drivers/scsi/fnic/vnic_dev.h +++ b/drivers/scsi/fnic/vnic_dev.h @@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); int vnic_dev_init(struct vnic_dev *vdev, int arg); +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, + u16 new_default_vlan); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h index 7c9ccbd4134..3e2fcbda6ae 100644 --- a/drivers/scsi/fnic/vnic_devcmd.h +++ b/drivers/scsi/fnic/vnic_devcmd.h @@ -196,6 +196,73 @@ enum vnic_devcmd_cmd { /* undo initialize of virtual link */ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46) }; /* flags for CMD_OPEN */ diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h index fbb55364e27..e343e1d0f80 100644 --- a/drivers/scsi/fnic/vnic_scsi.h +++ b/drivers/scsi/fnic/vnic_scsi.h @@ -54,8 +54,8 @@ #define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 #define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 -#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 -#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 +#define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1 +#define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 5041f925c19..a1bc8ca958e 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -78,10 +78,6 @@ * */ -/* - * $Log: generic_NCR5380.c,v $ - */ - /* settings for DTC3181E card with only Mustek scanner attached */ #define USLEEP #define USLEEP_POLL 1 @@ -461,7 +457,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt) if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, generic_NCR5380_intr, - IRQF_DISABLED, "NCR5380", instance)) { + 0, "NCR5380", instance)) { printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } @@ -745,42 +741,36 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, #include "NCR5380.c" -#define PRINTP(x) len += sprintf(buffer+len, x) +#define PRINTP(x) seq_printf(m, x) #define ANDP , -static int sprint_opcode(char *buffer, int len, int opcode) +static void sprint_opcode(struct seq_file *m, int opcode) { - int start = len; PRINTP("0x%02x " ANDP opcode); - return len - start; } -static int sprint_command(char *buffer, int len, unsigned char *command) +static void sprint_command(struct seq_file *m, unsigned char *command) { - int i, s, start = len; - len += sprint_opcode(buffer, len, command[0]); + int i, s; + sprint_opcode(m, command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) PRINTP("%02x " ANDP command[i]); PRINTP("\n"); - return len - start; } /** * sprintf_Scsi_Cmnd - print a scsi command - * @buffer: buffr to print into - * @len: buffer length + * @m: seq_fil to print into * @cmd: SCSI command block * * Print out the target and command data in hex */ -static int sprint_Scsi_Cmnd(char *buffer, int len, Scsi_Cmnd * cmd) +static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd) { - int start = len; PRINTP("host number %d destination target %d, lun %d\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun); PRINTP(" command = "); - len += sprint_command(buffer, len, cmd->cmnd); - return len - start; + sprint_command(m, cmd->cmnd); } /** @@ -800,9 +790,8 @@ static int sprint_Scsi_Cmnd(char *buffer, int len, Scsi_Cmnd * cmd) * Locks: global cli/lock for queue walk */ -static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, char **start, off_t offset, int length, int inout) +static int generic_NCR5380_show_info(struct seq_file *m, struct Scsi_Host *scsi_ptr) { - int len = 0; NCR5380_local_declare(); unsigned long flags; unsigned char status; @@ -853,16 +842,16 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c PRINTP(" T:%d %s " ANDP dev->id ANDP scsi_device_type(dev->type)); for (i = 0; i < 8; i++) if (dev->vendor[i] >= 0x20) - *(buffer + (len++)) = dev->vendor[i]; - *(buffer + (len++)) = ' '; + seq_putc(m, dev->vendor[i]); + seq_putc(m, ' '); for (i = 0; i < 16; i++) if (dev->model[i] >= 0x20) - *(buffer + (len++)) = dev->model[i]; - *(buffer + (len++)) = ' '; + seq_putc(m, dev->model[i]); + seq_putc(m, ' '); for (i = 0; i < 4; i++) if (dev->rev[i] >= 0x20) - *(buffer + (len++)) = dev->rev[i]; - *(buffer + (len++)) = ' '; + seq_putc(m, dev->rev[i]); + seq_putc(m, ' '); PRINTP("\n%10ld kb read in %5ld secs" ANDP br / 1024 ANDP tr); if (tr) @@ -886,32 +875,28 @@ static int generic_NCR5380_proc_info(struct Scsi_Host *scsi_ptr, char *buffer, c if (!hostdata->connected) { PRINTP("No currently connected command\n"); } else { - len += sprint_Scsi_Cmnd(buffer, len, (Scsi_Cmnd *) hostdata->connected); + sprint_Scsi_Cmnd(m, (Scsi_Cmnd *) hostdata->connected); } PRINTP("issue_queue\n"); for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) - len += sprint_Scsi_Cmnd(buffer, len, ptr); + sprint_Scsi_Cmnd(m, ptr); PRINTP("disconnected_queue\n"); for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble) - len += sprint_Scsi_Cmnd(buffer, len, ptr); + sprint_Scsi_Cmnd(m, ptr); - *start = buffer + offset; - len -= offset; - if (len > length) - len = length; spin_unlock_irqrestore(scsi_ptr->host_lock, flags); - return len; + return 0; } #undef PRINTP #undef ANDP static struct scsi_host_template driver_template = { - .proc_info = generic_NCR5380_proc_info, + .show_info = generic_NCR5380_show_info, .name = "Generic NCR5380/NCR53C400 Scsi Driver", .detect = generic_NCR5380_detect, .release = generic_NCR5380_release_resources, diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 1bcdb7beb77..703adf78e0b 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -25,10 +25,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: generic_NCR5380.h,v $ - */ - #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H @@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *); #define CAN_QUEUE 16 #endif -#ifndef HOSTS_C - #define __STRVAL(x) #x #define STRVAL(x) __STRVAL(x) @@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *); #define BOARD_NCR53C400A 2 #define BOARD_DTC3181E 3 -#endif /* else def HOSTS_C */ #endif /* ndef ASM */ #endif /* GENERIC_NCR5380_H */ diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 59bceac51a4..0f1ae13ce7c 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -594,8 +594,6 @@ static void gdth_pci_remove_one(struct pci_dev *pdev) { gdth_ha_str *ha = pci_get_drvdata(pdev); - pci_set_drvdata(pdev, NULL); - list_del(&ha->list); gdth_remove_one(ha); @@ -4676,7 +4674,8 @@ static struct scsi_host_template gdth_template = { .eh_bus_reset_handler = gdth_eh_bus_reset, .slave_configure = gdth_slave_configure, .bios_param = gdth_bios_param, - .proc_info = gdth_proc_info, + .show_info = gdth_show_info, + .write_info = gdth_set_info, .eh_timed_out = gdth_timed_out, .proc_name = "gdth", .can_queue = GDTH_MAXCMDS, @@ -4685,6 +4684,7 @@ static struct scsi_host_template gdth_template = { .cmd_per_lun = GDTH_MAXC_P_L, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, }; #ifdef CONFIG_ISA @@ -4711,7 +4711,7 @@ static int __init gdth_isa_probe_one(u32 isa_bios) printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n", isa_bios, ha->irq, ha->drq); - error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); + error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha); if (error) { printk("GDT-ISA: Unable to allocate IRQ\n"); goto out_host_put; @@ -4843,7 +4843,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot) printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n", eisa_slot >> 12, ha->irq); - error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); + error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha); if (error) { printk("GDT-EISA: Unable to allocate IRQ\n"); goto out_host_put; @@ -4979,7 +4979,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) ha->irq); error = request_irq(ha->irq, gdth_interrupt, - IRQF_DISABLED|IRQF_SHARED, "gdth", ha); + IRQF_SHARED, "gdth", ha); if (error) { printk("GDT-PCI: Unable to allocate IRQ\n"); goto out_host_put; diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index fbf6f0f4b0d..3fd8b83ffbf 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h @@ -1007,6 +1007,7 @@ typedef struct { /* function prototyping */ -int gdth_proc_info(struct Scsi_Host *, char *,char **,off_t,int,int); +int gdth_show_info(struct seq_file *, struct Scsi_Host *); +int gdth_set_info(struct Scsi_Host *, char *, int); #endif diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 652754319a4..9fb63268486 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c @@ -5,23 +5,9 @@ #include <linux/completion.h> #include <linux/slab.h> -int gdth_proc_info(struct Scsi_Host *host, char *buffer,char **start,off_t offset,int length, - int inout) +int gdth_set_info(struct Scsi_Host *host, char *buffer, int length) { gdth_ha_str *ha = shost_priv(host); - - TRACE2(("gdth_proc_info() length %d offs %d inout %d\n", - length,(int)offset,inout)); - - if (inout) - return(gdth_set_info(buffer,length,host,ha)); - else - return(gdth_get_info(buffer,start,offset,length,host,ha)); -} - -static int gdth_set_info(char *buffer,int length,struct Scsi_Host *host, - gdth_ha_str *ha) -{ int ret_val = -EINVAL; TRACE2(("gdth_set_info() ha %d\n",ha->hanum,)); @@ -149,12 +135,10 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, return(-EINVAL); } -static int gdth_get_info(char *buffer,char **start,off_t offset,int length, - struct Scsi_Host *host, gdth_ha_str *ha) +int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) { - int size = 0,len = 0; + gdth_ha_str *ha = shost_priv(host); int hlen; - off_t begin = 0,pos = 0; int id, i, j, k, sec, flag; int no_mdrv = 0, drv_no, is_mirr; u32 cnt; @@ -189,8 +173,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, /* request is i.e. "cat /proc/scsi/gdth/0" */ /* format: %-15s\t%-10s\t%-15s\t%s */ /* driver parameters */ - size = sprintf(buffer+len,"Driver Parameters:\n"); - len += size; pos = begin + len; + seq_printf(m, "Driver Parameters:\n"); if (reserve_list[0] == 0xff) strcpy(hrec, "--"); else { @@ -201,69 +184,50 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]); } } - size = sprintf(buffer+len, + seq_printf(m, " reserve_mode: \t%d \treserve_list: \t%s\n", reserve_mode, hrec); - len += size; pos = begin + len; - size = sprintf(buffer+len, + seq_printf(m, " max_ids: \t%-3d \thdr_channel: \t%d\n", max_ids, hdr_channel); - len += size; pos = begin + len; /* controller information */ - size = sprintf(buffer+len,"\nDisk Array Controller Information:\n"); - len += size; pos = begin + len; - strcpy(hrec, ha->binfo.type_string); - size = sprintf(buffer+len, + seq_printf(m,"\nDisk Array Controller Information:\n"); + seq_printf(m, " Number: \t%d \tName: \t%s\n", - ha->hanum, hrec); - len += size; pos = begin + len; + ha->hanum, ha->binfo.type_string); + seq_printf(m, + " Driver Ver.: \t%-10s\tFirmware Ver.: \t", + GDTH_VERSION_STR); if (ha->more_proc) - sprintf(hrec, "%d.%02d.%02d-%c%03X", + seq_printf(m, "%d.%02d.%02d-%c%03X\n", (u8)(ha->binfo.upd_fw_ver>>24), (u8)(ha->binfo.upd_fw_ver>>16), (u8)(ha->binfo.upd_fw_ver), ha->bfeat.raid ? 'R':'N', ha->binfo.upd_revision); else - sprintf(hrec, "%d.%02d", (u8)(ha->cpar.version>>8), + seq_printf(m, "%d.%02d\n", (u8)(ha->cpar.version>>8), (u8)(ha->cpar.version)); - - size = sprintf(buffer+len, - " Driver Ver.: \t%-10s\tFirmware Ver.: \t%s\n", - GDTH_VERSION_STR, hrec); - len += size; pos = begin + len; - if (ha->more_proc) { + if (ha->more_proc) /* more information: 1. about controller */ - size = sprintf(buffer+len, + seq_printf(m, " Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n", ha->binfo.ser_no, ha->binfo.memsize / 1024); - len += size; pos = begin + len; - } #ifdef GDTH_DMA_STATISTICS /* controller statistics */ - size = sprintf(buffer+len,"\nController Statistics:\n"); - len += size; pos = begin + len; - size = sprintf(buffer+len, + seq_printf(m,"\nController Statistics:\n"); + seq_printf(m, " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n", ha->dma32_cnt, ha->dma64_cnt); - len += size; pos = begin + len; #endif - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) - goto stop_output; - if (ha->more_proc) { /* more information: 2. about physical devices */ - size = sprintf(buffer+len,"\nPhysical Devices:"); - len += size; pos = begin + len; + seq_printf(m, "\nPhysical Devices:"); flag = FALSE; buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); @@ -309,21 +273,19 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, strncpy(hrec+8,pdi->product,16); strncpy(hrec+24,pdi->revision,4); hrec[28] = 0; - size = sprintf(buffer+len, + seq_printf(m, "\n Chn/ID/LUN: \t%c/%02d/%d \tName: \t%s\n", 'A'+i,pdi->target_id,pdi->lun,hrec); - len += size; pos = begin + len; flag = TRUE; pdi->no_ldrive &= 0xffff; if (pdi->no_ldrive == 0xffff) strcpy(hrec,"--"); else sprintf(hrec,"%d",pdi->no_ldrive); - size = sprintf(buffer+len, + seq_printf(m, " Capacity [MB]:\t%-6d \tTo Log. Drive: \t%s\n", pdi->blkcnt/(1024*1024/pdi->blksize), hrec); - len += size; pos = begin + len; } else { pdi->devtype = 0xff; } @@ -333,11 +295,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, for (k = 0; k < pds->count; ++k) { if (pds->list[k].tid == pdi->target_id && pds->list[k].lun == pdi->lun) { - size = sprintf(buffer+len, + seq_printf(m, " Retries: \t%-6d \tReassigns: \t%d\n", pds->list[k].retries, pds->list[k].reassigns); - len += size; pos = begin + len; break; } } @@ -355,32 +316,20 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, pdef->sddc_type = 0x08; if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) { - size = sprintf(buffer+len, + seq_printf(m, " Grown Defects:\t%d\n", pdef->sddc_cnt); - len += size; pos = begin + len; } } - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) { - gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - goto stop_output; - } } } gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - if (!flag) { - size = sprintf(buffer+len, "\n --\n"); - len += size; pos = begin + len; - } + if (!flag) + seq_printf(m, "\n --\n"); /* 3. about logical drives */ - size = sprintf(buffer+len,"\nLogical Drives:"); - len += size; pos = begin + len; + seq_printf(m,"\nLogical Drives:"); flag = FALSE; buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); @@ -418,10 +367,9 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, } if (drv_no == i) { - size = sprintf(buffer+len, + seq_printf(m, "\n Number: \t%-2d \tStatus: \t%s\n", drv_no, hrec); - len += size; pos = begin + len; flag = TRUE; no_mdrv = pcdi->cd_ldcnt; if (no_mdrv > 1 || pcdi->ld_slave != -1) { @@ -436,61 +384,37 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, } else { strcpy(hrec, "???"); } - size = sprintf(buffer+len, + seq_printf(m, " Capacity [MB]:\t%-6d \tType: \t%s\n", pcdi->ld_blkcnt/(1024*1024/pcdi->ld_blksize), hrec); - len += size; pos = begin + len; } else { - size = sprintf(buffer+len, + seq_printf(m, " Slave Number: \t%-2d \tStatus: \t%s\n", drv_no & 0x7fff, hrec); - len += size; pos = begin + len; } drv_no = pcdi->ld_slave; - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) { - gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - goto stop_output; - } } while (drv_no != -1); - if (is_mirr) { - size = sprintf(buffer+len, + if (is_mirr) + seq_printf(m, " Missing Drv.: \t%-2d \tInvalid Drv.: \t%d\n", no_mdrv - j - k, k); - len += size; pos = begin + len; - } - + if (!ha->hdr[i].is_arraydrv) strcpy(hrec, "--"); else sprintf(hrec, "%d", ha->hdr[i].master_no); - size = sprintf(buffer+len, + seq_printf(m, " To Array Drv.:\t%s\n", hrec); - len += size; pos = begin + len; - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) { - gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - goto stop_output; - } } gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - if (!flag) { - size = sprintf(buffer+len, "\n --\n"); - len += size; pos = begin + len; - } + if (!flag) + seq_printf(m, "\n --\n"); /* 4. about array drives */ - size = sprintf(buffer+len,"\nArray Drives:"); - len += size; pos = begin + len; + seq_printf(m,"\nArray Drives:"); flag = FALSE; buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); @@ -525,10 +449,9 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, strcat(hrec, "/expand"); else if (pai->ai_ext_state & 0x1) strcat(hrec, "/patch"); - size = sprintf(buffer+len, + seq_printf(m, "\n Number: \t%-2d \tStatus: \t%s\n", i,hrec); - len += size; pos = begin + len; flag = TRUE; if (pai->ai_type == 0) @@ -539,31 +462,19 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, strcpy(hrec, "RAID-5"); else strcpy(hrec, "RAID-10"); - size = sprintf(buffer+len, + seq_printf(m, " Capacity [MB]:\t%-6d \tType: \t%s\n", pai->ai_size/(1024*1024/pai->ai_secsize), hrec); - len += size; pos = begin + len; - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) { - gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - goto stop_output; - } } } gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - if (!flag) { - size = sprintf(buffer+len, "\n --\n"); - len += size; pos = begin + len; - } + if (!flag) + seq_printf(m, "\n --\n"); /* 5. about host drives */ - size = sprintf(buffer+len,"\nHost Drives:"); - len += size; pos = begin + len; + seq_printf(m,"\nHost Drives:"); flag = FALSE; buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr); @@ -605,33 +516,22 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, if (!(ha->hdr[i].present)) continue; - size = sprintf(buffer+len, + seq_printf(m, "\n Number: \t%-2d \tArr/Log. Drive:\t%d\n", i, ha->hdr[i].ldr_no); - len += size; pos = begin + len; flag = TRUE; - size = sprintf(buffer+len, + seq_printf(m, " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n", (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec); - len += size; pos = begin + len; - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) - goto stop_output; } - if (!flag) { - size = sprintf(buffer+len, "\n --\n"); - len += size; pos = begin + len; - } + if (!flag) + seq_printf(m, "\n --\n"); } /* controller events */ - size = sprintf(buffer+len,"\nController Events:\n"); - len += size; pos = begin + len; + seq_printf(m,"\nController Events:\n"); for (id = -1;;) { id = gdth_read_event(ha, id, estr); @@ -643,29 +543,14 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, do_gettimeofday(&tv); sec = (int)(tv.tv_sec - estr->first_stamp); if (sec < 0) sec = 0; - size = sprintf(buffer+len," date- %02d:%02d:%02d\t%s\n", + seq_printf(m," date- %02d:%02d:%02d\t%s\n", sec/3600, sec%3600/60, sec%60, hrec); - len += size; pos = begin + len; - if (pos < offset) { - len = 0; - begin = pos; - } - if (pos > offset + length) - goto stop_output; } if (id == -1) break; } - stop_output: - *start = buffer +(offset-begin); - len -= (offset-begin); - if (len > length) - len = length; - TRACE2(("get_info() len %d pos %d begin %d offset %d length %d size %d\n", - len,(int)pos,(int)begin,(int)offset,length,size)); - rc = len; - + rc = 0; free_fail: kfree(gdtcmd); kfree(estr); diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h index dab15f59f2c..aaa61819897 100644 --- a/drivers/scsi/gdth_proc.h +++ b/drivers/scsi/gdth_proc.h @@ -8,11 +8,6 @@ int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd, int timeout, u32 *info); -static int gdth_set_info(char *buffer,int length,struct Scsi_Host *host, - gdth_ha_str *ha); -static int gdth_get_info(char *buffer,char **start,off_t offset,int length, - struct Scsi_Host *host, gdth_ha_str *ha); - static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, int length, gdth_ha_str *ha); diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index dbe4cc6b9f8..3b6f83ffddc 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -191,7 +191,8 @@ static int gvp11_bus_reset(struct scsi_cmnd *cmd) static struct scsi_host_template gvp11_scsi_template = { .module = THIS_MODULE, .name = "GVP Series II SCSI", - .proc_info = wd33c93_proc_info, + .show_info = wd33c93_show_info, + .write_info = wd33c93_write_info, .proc_name = "GVP11", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, @@ -309,7 +310,7 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) if (!request_mem_region(address, 256, "wd33c93")) return -EBUSY; - regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); + regs = ZTWO_VADDR(address); error = check_wd33c93(regs); if (error) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index df0c3c71ea4..3cbb57a8b84 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -169,6 +169,7 @@ void scsi_remove_host(struct Scsi_Host *shost) spin_unlock_irqrestore(shost->host_lock, flags); scsi_autopm_get_host(shost); + flush_workqueue(shost->tmf_work_q); scsi_forget_host(shost); mutex_unlock(&shost->scan_mutex); scsi_proc_host_rm(shost); @@ -294,6 +295,8 @@ static void scsi_host_dev_release(struct device *dev) scsi_proc_hostdir_rm(shost->hostt); + if (shost->tmf_work_q) + destroy_workqueue(shost->tmf_work_q); if (shost->ehandler) kthread_stop(shost->ehandler); if (shost->work_q) @@ -316,6 +319,12 @@ static void scsi_host_dev_release(struct device *dev) kfree(shost); } +static int shost_eh_deadline = -1; + +module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(eh_deadline, + "SCSI EH timeout in seconds (should be between 0 and 2^31-1)"); + static struct device_type scsi_host_type = { .name = "scsi_host", .release = scsi_host_dev_release, @@ -354,7 +363,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) INIT_LIST_HEAD(&shost->eh_cmd_q); INIT_LIST_HEAD(&shost->starved_list); init_waitqueue_head(&shost->host_wait); - mutex_init(&shost->scan_mutex); /* @@ -388,6 +396,17 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->unchecked_isa_dma = sht->unchecked_isa_dma; shost->use_clustering = sht->use_clustering; shost->ordered_tag = sht->ordered_tag; + shost->no_write_same = sht->no_write_same; + + if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) + shost->eh_deadline = -1; + else if ((ulong) shost_eh_deadline * HZ > INT_MAX) { + shost_printk(KERN_WARNING, shost, + "eh_deadline %u too large, setting to %u\n", + shost_eh_deadline, INT_MAX / HZ); + shost->eh_deadline = INT_MAX; + } else + shost->eh_deadline = shost_eh_deadline * HZ; if (sht->supported_mode == MODE_UNKNOWN) /* means we didn't set it ... default to INITIATOR */ @@ -436,9 +455,19 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) goto fail_kfree; } + shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d", + WQ_UNBOUND | WQ_MEM_RECLAIM, + 1, shost->host_no); + if (!shost->tmf_work_q) { + printk(KERN_WARNING "scsi%d: failed to create tmf workq\n", + shost->host_no); + goto fail_kthread; + } scsi_proc_hostdir_add(shost->hostt); return shost; + fail_kthread: + kthread_stop(shost->ehandler); fail_kfree: kfree(shost); return NULL; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 7f4f790a3d7..31184b35370 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1,6 +1,6 @@ /* * Disk Array driver for HP Smart Array SAS controllers - * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -29,7 +29,6 @@ #include <linux/delay.h> #include <linux/fs.h> #include <linux/timer.h> -#include <linux/seq_file.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/compat.h> @@ -48,13 +47,14 @@ #include <linux/string.h> #include <linux/bitmap.h> #include <linux/atomic.h> -#include <linux/kthread.h> #include <linux/jiffies.h> +#include <linux/percpu.h> +#include <asm/div64.h> #include "hpsa_cmd.h" #include "hpsa.h" /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "2.0.2-1" +#define HPSA_DRIVER_VERSION "3.4.4-1" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" @@ -89,8 +89,8 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, @@ -99,7 +99,6 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, @@ -107,7 +106,30 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, + {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, + {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} @@ -125,8 +147,8 @@ static struct board_type products[] = { {0x3245103C, "Smart Array P410i", &SA5_access}, {0x3247103C, "Smart Array P411", &SA5_access}, {0x3249103C, "Smart Array P812", &SA5_access}, - {0x324a103C, "Smart Array P712m", &SA5_access}, - {0x324b103C, "Smart Array P711m", &SA5_access}, + {0x324A103C, "Smart Array P712m", &SA5_access}, + {0x324B103C, "Smart Array P711m", &SA5_access}, {0x3350103C, "Smart Array P222", &SA5_access}, {0x3351103C, "Smart Array P420", &SA5_access}, {0x3352103C, "Smart Array P421", &SA5_access}, @@ -134,28 +156,46 @@ static struct board_type products[] = { {0x3354103C, "Smart Array P420i", &SA5_access}, {0x3355103C, "Smart Array P220i", &SA5_access}, {0x3356103C, "Smart Array P721m", &SA5_access}, - {0x1920103C, "Smart Array", &SA5_access}, - {0x1921103C, "Smart Array", &SA5_access}, - {0x1922103C, "Smart Array", &SA5_access}, - {0x1923103C, "Smart Array", &SA5_access}, - {0x1924103C, "Smart Array", &SA5_access}, - {0x1925103C, "Smart Array", &SA5_access}, - {0x1926103C, "Smart Array", &SA5_access}, - {0x1928103C, "Smart Array", &SA5_access}, - {0x334d103C, "Smart Array P822se", &SA5_access}, + {0x1921103C, "Smart Array P830i", &SA5_access}, + {0x1922103C, "Smart Array P430", &SA5_access}, + {0x1923103C, "Smart Array P431", &SA5_access}, + {0x1924103C, "Smart Array P830", &SA5_access}, + {0x1926103C, "Smart Array P731m", &SA5_access}, + {0x1928103C, "Smart Array P230i", &SA5_access}, + {0x1929103C, "Smart Array P530", &SA5_access}, + {0x21BD103C, "Smart Array", &SA5_access}, + {0x21BE103C, "Smart Array", &SA5_access}, + {0x21BF103C, "Smart Array", &SA5_access}, + {0x21C0103C, "Smart Array", &SA5_access}, + {0x21C1103C, "Smart Array", &SA5_access}, + {0x21C2103C, "Smart Array", &SA5_access}, + {0x21C3103C, "Smart Array", &SA5_access}, + {0x21C4103C, "Smart Array", &SA5_access}, + {0x21C5103C, "Smart Array", &SA5_access}, + {0x21C6103C, "Smart Array", &SA5_access}, + {0x21C7103C, "Smart Array", &SA5_access}, + {0x21C8103C, "Smart Array", &SA5_access}, + {0x21C9103C, "Smart Array", &SA5_access}, + {0x21CA103C, "Smart Array", &SA5_access}, + {0x21CB103C, "Smart Array", &SA5_access}, + {0x21CC103C, "Smart Array", &SA5_access}, + {0x21CD103C, "Smart Array", &SA5_access}, + {0x21CE103C, "Smart Array", &SA5_access}, + {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, + {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, + {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, + {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, + {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; static int number_of_controllers; -static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); -static spinlock_t lockup_detector_lock; -static struct task_struct *hpsa_lockup_detector; - static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); -static void start_io(struct ctlr_info *h); +static void lock_and_start_io(struct ctlr_info *h); +static void start_io(struct ctlr_info *h, unsigned long *flags); #ifdef CONFIG_COMPAT static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); @@ -166,8 +206,9 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); static struct CommandList *cmd_alloc(struct ctlr_info *h); static struct CommandList *cmd_special_alloc(struct ctlr_info *h); static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, - void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, + void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type); +#define VPD_PAGE (1 << 8) static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static void hpsa_scan_start(struct Scsi_Host *); @@ -188,7 +229,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, struct CommandList *c); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, - int nsgs, int *bucket_map); + int nsgs, int min_blocks, int *bucket_map); static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); static inline u32 next_command(struct ctlr_info *h, u8 q); static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, @@ -200,8 +241,14 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready); static inline void finish_cmd(struct CommandList *c); +static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h); #define BOARD_NOT_READY 0 #define BOARD_READY 1 +static void hpsa_drain_accel_commands(struct ctlr_info *h); +static void hpsa_flush_cache(struct ctlr_info *h); +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr); static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { @@ -264,6 +311,55 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c) return 1; } +static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int status, len; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + char tmpbuf[10]; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; + strncpy(tmpbuf, buf, len); + tmpbuf[len] = '\0'; + if (sscanf(tmpbuf, "%d", &status) != 1) + return -EINVAL; + h = shost_to_hba(shost); + h->acciopath_status = !!status; + dev_warn(&h->pdev->dev, + "hpsa: HP SSD Smart Path %s via sysfs update.\n", + h->acciopath_status ? "enabled" : "disabled"); + return count; +} + +static ssize_t host_store_raid_offload_debug(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int debug_level, len; + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + char tmpbuf[10]; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; + strncpy(tmpbuf, buf, len); + tmpbuf[len] = '\0'; + if (sscanf(tmpbuf, "%d", &debug_level) != 1) + return -EINVAL; + if (debug_level < 0) + debug_level = 0; + h = shost_to_hba(shost); + h->raid_offload_debug = debug_level; + dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", + h->raid_offload_debug); + return count; +} + static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -311,6 +407,17 @@ static ssize_t host_show_transport_mode(struct device *dev, "performant" : "simple"); } +static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + + h = shost_to_hba(shost); + return snprintf(buf, 30, "HP SSD Smart Path %s\n", + (h->acciopath_status == 1) ? "enabled" : "disabled"); +} + /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ @@ -400,6 +507,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "1(ADM)", "UNKNOWN" }; +#define HPSA_RAID_0 0 +#define HPSA_RAID_4 1 +#define HPSA_RAID_1 2 /* also used for RAID 10 */ +#define HPSA_RAID_5 3 /* also used for RAID 50 */ +#define HPSA_RAID_51 4 +#define HPSA_RAID_6 5 /* also used for RAID 60 */ +#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) static ssize_t raid_level_show(struct device *dev, @@ -488,10 +602,39 @@ static ssize_t unique_id_show(struct device *dev, sn[12], sn[13], sn[14], sn[15]); } +static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + int offload_enabled; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + offload_enabled = hdev->offload_enabled; + spin_unlock_irqrestore(&h->lock, flags); + return snprintf(buf, 20, "%d\n", offload_enabled); +} + static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, + host_show_hp_ssd_smart_path_enabled, NULL); +static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, + host_show_hp_ssd_smart_path_status, + host_store_hp_ssd_smart_path_status); +static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, + host_store_raid_offload_debug); static DEVICE_ATTR(firmware_revision, S_IRUGO, host_show_firmware_revision, NULL); static DEVICE_ATTR(commands_outstanding, S_IRUGO, @@ -505,6 +648,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, &dev_attr_lunid, &dev_attr_unique_id, + &dev_attr_hp_ssd_smart_path_enabled, NULL, }; @@ -514,6 +658,8 @@ static struct device_attribute *hpsa_shost_attrs[] = { &dev_attr_commands_outstanding, &dev_attr_transport_mode, &dev_attr_resettable, + &dev_attr_hp_ssd_smart_path_status, + &dev_attr_raid_offload_debug, NULL, }; @@ -538,6 +684,7 @@ static struct scsi_host_template hpsa_driver_template = { .sdev_attrs = hpsa_sdev_attrs, .shost_attrs = hpsa_shost_attrs, .max_sectors = 8192, + .no_write_same = 1, }; @@ -550,9 +697,12 @@ static inline void addQ(struct list_head *list, struct CommandList *c) static inline u32 next_command(struct ctlr_info *h, u8 q) { u32 a; - struct reply_pool *rq = &h->reply_queue[q]; + struct reply_queue_buffer *rq = &h->reply_queue[q]; unsigned long flags; + if (h->transMethod & CFGTBL_Trans_io_accel1) + return h->access.command_completed(h, q); + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h, q); @@ -573,6 +723,32 @@ static inline u32 next_command(struct ctlr_info *h, u8 q) return a; } +/* + * There are some special bits in the bus address of the + * command that we have to set for the controller to know + * how to process the command: + * + * Normal performant mode: + * bit 0: 1 means performant mode, 0 means simple mode. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 0) + * + * ioaccel1 mode: + * bit 0 = "performant mode" bit. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 110) + * (command type is needed because ioaccel1 mode + * commands are submitted through the same register as normal + * mode commands, so this is how the controller knows whether + * the command is normal mode or ioaccel1 mode.) + * + * ioaccel2 mode: + * bit 0 = "performant mode" bit. + * bits 1-4 = block fetch table entry (note extra bit) + * bits 4-6 = not needed, because ioaccel2 mode has + * a separate special register for submitting commands. + */ + /* set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number @@ -581,12 +757,47 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) { c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); - if (likely(h->msix_vector)) + if (likely(h->msix_vector > 0)) c->Header.ReplyQueue = - smp_processor_id() % h->nreply_queues; + raw_smp_processor_id() % h->nreply_queues; } } +static void set_ioaccel1_performant_mode(struct ctlr_info *h, + struct CommandList *c) +{ + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + + /* Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + cp->ReplyQueue = smp_processor_id() % h->nreply_queues; + /* Set the bits in the address sent down to include: + * - performant mode bit (bit 0) + * - pull count (bits 1-3) + * - command type (bits 4-6) + */ + c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | + IOACCEL1_BUSADDR_CMDTYPE; +} + +static void set_ioaccel2_performant_mode(struct ctlr_info *h, + struct CommandList *c) +{ + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + + /* Tell the controller to post the reply to the queue for this + * processor. This seems to give the best I/O throughput. + */ + cp->reply_queue = smp_processor_id() % h->nreply_queues; + /* Set the bits in the address sent down to include: + * - performant mode bit not used in ioaccel mode 2 + * - pull count (bits 0-3) + * - command type isn't needed for ioaccel2 + */ + c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); +} + static int is_firmware_flash_cmd(u8 *cdb) { return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; @@ -621,13 +832,22 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h, { unsigned long flags; - set_performant_mode(h, c); + switch (c->cmd_type) { + case CMD_IOACCEL1: + set_ioaccel1_performant_mode(h, c); + break; + case CMD_IOACCEL2: + set_ioaccel2_performant_mode(h, c); + break; + default: + set_performant_mode(h, c); + } dial_down_lockup_detection_during_fw_flash(h, c); spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; + start_io(h, &flags); spin_unlock_irqrestore(&h->lock, flags); - start_io(h); } static inline void removeQ(struct CommandList *c) @@ -765,6 +985,14 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno, /* Raid level changed. */ h->dev[entry]->raid_level = new_entry->raid_level; + + /* Raid offload parameters changed. */ + h->dev[entry]->offload_config = new_entry->offload_config; + h->dev[entry]->offload_enabled = new_entry->offload_enabled; + h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; + h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; + h->dev[entry]->raid_map = new_entry->raid_map; + dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n", scsi_device_type(new_entry->devtype), hostno, new_entry->bus, new_entry->target, new_entry->lun); @@ -885,6 +1113,10 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1, */ if (dev1->raid_level != dev2->raid_level) return 1; + if (dev1->offload_config != dev2->offload_config) + return 1; + if (dev1->offload_enabled != dev2->offload_enabled) + return 1; return 0; } @@ -915,6 +1147,9 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, return DEVICE_UPDATED; return DEVICE_SAME; } else { + /* Keep offline devices offline */ + if (needle->volume_offline) + return DEVICE_NOT_FOUND; return DEVICE_CHANGED; } } @@ -923,6 +1158,110 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, return DEVICE_NOT_FOUND; } +static void hpsa_monitor_offline_device(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + struct offline_device_entry *device; + unsigned long flags; + + /* Check to see if device is already on the list */ + spin_lock_irqsave(&h->offline_device_lock, flags); + list_for_each_entry(device, &h->offline_device_list, offline_list) { + if (memcmp(device->scsi3addr, scsi3addr, + sizeof(device->scsi3addr)) == 0) { + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&h->offline_device_lock, flags); + + /* Device is not on the list, add it. */ + device = kmalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); + return; + } + memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); + spin_lock_irqsave(&h->offline_device_lock, flags); + list_add_tail(&device->offline_list, &h->offline_device_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); +} + +/* Print a message explaining various offline volume states */ +static void hpsa_show_volume_status(struct ctlr_info *h, + struct hpsa_scsi_dev_t *sd) +{ + if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + switch (sd->volume_offline) { + case HPSA_LV_OK: + break; + case HPSA_LV_UNDERGOING_ERASE: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_RPI: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_RPI: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_ENCRYPTED_NO_KEY: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_ENCRYPTION: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_ENCRYPTION: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + case HPSA_LV_PENDING_ENCRYPTION_REKEYING: + dev_info(&h->pdev->dev, + "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", + h->scsi_host->host_no, + sd->bus, sd->target, sd->lun); + break; + } +} + static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, struct hpsa_scsi_dev_t *sd[], int nsds) { @@ -987,6 +1326,20 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, for (i = 0; i < nsds; i++) { if (!sd[i]) /* if already added above. */ continue; + + /* Don't add devices which are NOT READY, FORMAT IN PROGRESS + * as the SCSI mid-layer does not handle such devices well. + * It relentlessly loops sending TUR at 3Hz, then READ(10) + * at 160Hz, and prevents the system from coming up. + */ + if (sd[i]->volume_offline) { + hpsa_show_volume_status(h, sd[i]); + dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", + h->scsi_host->host_no, + sd[i]->bus, sd[i]->target, sd[i]->lun); + continue; + } + device_change = hpsa_scsi_find_entry(sd[i], h->dev, h->ndevices, &entry); if (device_change == DEVICE_NOT_FOUND) { @@ -1005,6 +1358,17 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, } spin_unlock_irqrestore(&h->devlock, flags); + /* Monitor devices which are in one of several NOT READY states to be + * brought online later. This must be done without holding h->devlock, + * so don't touch h->dev[] + */ + for (i = 0; i < nsds; i++) { + if (!sd[i]) /* if already added above. */ + continue; + if (sd[i]->volume_offline) + hpsa_monitor_offline_device(h, sd[i]->scsi3addr); + } + /* Don't notify scsi mid layer of any changes the first time through * (or if there are no changes) scsi_scan_host will do it later the * first time through. @@ -1054,7 +1418,7 @@ free_and_out: } /* - * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * + * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * * Assume's h->devlock is held. */ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, @@ -1170,11 +1534,156 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); } + +/* Decode the various types of errors on ioaccel2 path. + * Return 1 for any error that should generate a RAID path retry. + * Return 0 for errors that don't require a RAID path retry. + */ +static int handle_ioaccel_mode2_error(struct ctlr_info *h, + struct CommandList *c, + struct scsi_cmnd *cmd, + struct io_accel2_cmd *c2) +{ + int data_len; + int retry = 0; + + switch (c2->error_data.serv_response) { + case IOACCEL2_SERV_RESPONSE_COMPLETE: + switch (c2->error_data.status) { + case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: + break; + case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: + dev_warn(&h->pdev->dev, + "%s: task complete with check condition.\n", + "HP SSD Smart Path"); + cmd->result |= SAM_STAT_CHECK_CONDITION; + if (c2->error_data.data_present != + IOACCEL2_SENSE_DATA_PRESENT) { + memset(cmd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); + break; + } + /* copy the sense data */ + data_len = c2->error_data.sense_data_len; + if (data_len > SCSI_SENSE_BUFFERSIZE) + data_len = SCSI_SENSE_BUFFERSIZE; + if (data_len > sizeof(c2->error_data.sense_data_buff)) + data_len = + sizeof(c2->error_data.sense_data_buff); + memcpy(cmd->sense_buffer, + c2->error_data.sense_data_buff, data_len); + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: + dev_warn(&h->pdev->dev, + "%s: task complete with BUSY status.\n", + "HP SSD Smart Path"); + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: + dev_warn(&h->pdev->dev, + "%s: task complete with reservation conflict.\n", + "HP SSD Smart Path"); + retry = 1; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: + /* Make scsi midlayer do unlimited retries */ + cmd->result = DID_IMM_RETRY << 16; + break; + case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: + dev_warn(&h->pdev->dev, + "%s: task complete with aborted status.\n", + "HP SSD Smart Path"); + retry = 1; + break; + default: + dev_warn(&h->pdev->dev, + "%s: task complete with unrecognized status: 0x%02x\n", + "HP SSD Smart Path", c2->error_data.status); + retry = 1; + break; + } + break; + case IOACCEL2_SERV_RESPONSE_FAILURE: + /* don't expect to get here. */ + dev_warn(&h->pdev->dev, + "unexpected delivery or target failure, status = 0x%02x\n", + c2->error_data.status); + retry = 1; + break; + case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: + break; + case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: + break; + case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: + dev_warn(&h->pdev->dev, "task management function rejected.\n"); + retry = 1; + break; + case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: + dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); + break; + default: + dev_warn(&h->pdev->dev, + "%s: Unrecognized server response: 0x%02x\n", + "HP SSD Smart Path", + c2->error_data.serv_response); + retry = 1; + break; + } + + return retry; /* retry on raid path? */ +} + +static void process_ioaccel2_completion(struct ctlr_info *h, + struct CommandList *c, struct scsi_cmnd *cmd, + struct hpsa_scsi_dev_t *dev) +{ + struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; + int raid_retry = 0; + + /* check for good status */ + if (likely(c2->error_data.serv_response == 0 && + c2->error_data.status == 0)) { + cmd_free(h, c); + cmd->scsi_done(cmd); + return; + } + + /* Any RAID offload error results in retry which will use + * the normal I/O path so the controller can handle whatever's + * wrong. + */ + if (is_logical_dev_addr_mode(dev->scsi3addr) && + c2->error_data.serv_response == + IOACCEL2_SERV_RESPONSE_FAILURE) { + dev->offload_enabled = 0; + h->drv_req_rescan = 1; /* schedule controller for a rescan */ + cmd->result = DID_SOFT_ERROR << 16; + cmd_free(h, c); + cmd->scsi_done(cmd); + return; + } + raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); + /* If error found, disable Smart Path, schedule a rescan, + * and force a retry on the standard path. + */ + if (raid_retry) { + dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", + "HP SSD Smart Path"); + dev->offload_enabled = 0; /* Disable Smart Path */ + h->drv_req_rescan = 1; /* schedule controller rescan */ + cmd->result = DID_SOFT_ERROR << 16; + } + cmd_free(h, c); + cmd->scsi_done(cmd); +} + static void complete_scsi_command(struct CommandList *cp) { struct scsi_cmnd *cmd; struct ctlr_info *h; struct ErrorInfo *ei; + struct hpsa_scsi_dev_t *dev; unsigned char sense_key; unsigned char asc; /* additional sense code */ @@ -1184,13 +1693,19 @@ static void complete_scsi_command(struct CommandList *cp) ei = cp->err_info; cmd = (struct scsi_cmnd *) cp->scsi_cmd; h = cp->h; + dev = cmd->device->hostdata; scsi_dma_unmap(cmd); /* undo the DMA mappings */ - if (cp->Header.SGTotal > h->max_cmd_sg_entries) + if ((cp->cmd_type == CMD_SCSI) && + (cp->Header.SGTotal > h->max_cmd_sg_entries)) hpsa_unmap_sg_chain_block(h, cp); cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ + + if (cp->cmd_type == CMD_IOACCEL2) + return process_ioaccel2_completion(h, cp, cmd, dev); + cmd->result |= ei->ScsiStatus; /* copy the sense data whether we need to or not. */ @@ -1205,11 +1720,37 @@ static void complete_scsi_command(struct CommandList *cp) scsi_set_resid(cmd, ei->ResidualCnt); if (ei->CommandStatus == 0) { - cmd->scsi_done(cmd); cmd_free(h, cp); + cmd->scsi_done(cmd); return; } + /* For I/O accelerator commands, copy over some fields to the normal + * CISS header used below for error handling. + */ + if (cp->cmd_type == CMD_IOACCEL1) { + struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; + cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); + cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; + cp->Header.Tag.lower = c->Tag.lower; + cp->Header.Tag.upper = c->Tag.upper; + memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); + memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); + + /* Any RAID offload error results in retry which will use + * the normal I/O path so the controller can handle whatever's + * wrong. + */ + if (is_logical_dev_addr_mode(dev->scsi3addr)) { + if (ei->CommandStatus == CMD_IOACCEL_DISABLED) + dev->offload_enabled = 0; + cmd->result = DID_SOFT_ERROR << 16; + cmd_free(h, cp); + cmd->scsi_done(cmd); + return; + } + } + /* an error has occurred */ switch (ei->CommandStatus) { @@ -1224,10 +1765,8 @@ static void complete_scsi_command(struct CommandList *cp) } if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { - if (check_for_unit_attention(h, cp)) { - cmd->result = DID_SOFT_ERROR << 16; + if (check_for_unit_attention(h, cp)) break; - } if (sense_key == ILLEGAL_REQUEST) { /* * SCSI REPORT_LUNS is commonly unsupported on @@ -1265,7 +1804,7 @@ static void complete_scsi_command(struct CommandList *cp) "has check condition: aborted command: " "ASC: 0x%x, ASCQ: 0x%x\n", cp, asc, ascq); - cmd->result = DID_SOFT_ERROR << 16; + cmd->result |= DID_SOFT_ERROR << 16; break; } /* Must be some other type of check condition */ @@ -1374,13 +1913,21 @@ static void complete_scsi_command(struct CommandList *cp) cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "Command unabortable\n"); break; + case CMD_IOACCEL_DISABLED: + /* This only handles the direct pass-through case since RAID + * offload is handled above. Just attempt a retry. + */ + cmd->result = DID_SOFT_ERROR << 16; + dev_warn(&h->pdev->dev, + "cp %p had HP SSD Smart Path error\n", cp); + break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } - cmd->scsi_done(cmd); cmd_free(h, cp); + cmd->scsi_done(cmd); } static void hpsa_pci_unmap(struct pci_dev *pdev, @@ -1423,6 +1970,7 @@ static int hpsa_map_one(struct pci_dev *pdev, cp->SG[0].Addr.upper = (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); cp->SG[0].Len = buflen; + cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */ cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ return 0; @@ -1438,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, wait_for_completion(&wait); } +static u32 lockup_detected(struct ctlr_info *h) +{ + int cpu; + u32 rc, *lockup_detected; + + cpu = get_cpu(); + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + rc = *lockup_detected; + put_cpu(); + return rc; +} + static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, struct CommandList *c) { - unsigned long flags; - /* If controller lockup detected, fake a hardware error. */ - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); + if (unlikely(lockup_detected(h))) c->err_info->CommandStatus = CMD_HARDWARE_ERR; - } else { - spin_unlock_irqrestore(&h->lock, flags); + else hpsa_scsi_do_simple_cmd_core(h, c); - } } #define MAX_DRIVER_CMD_RETRIES 25 @@ -1475,17 +2029,37 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, hpsa_pci_unmap(h->pdev, c, 1, data_direction); } -static void hpsa_scsi_interpret_error(struct CommandList *cp) +static void hpsa_print_cmd(struct ctlr_info *h, char *txt, + struct CommandList *c) { - struct ErrorInfo *ei; + const u8 *cdb = c->Request.CDB; + const u8 *lun = c->Header.LUN.LunAddrBytes; + + dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" + " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + txt, lun[0], lun[1], lun[2], lun[3], + lun[4], lun[5], lun[6], lun[7], + cdb[0], cdb[1], cdb[2], cdb[3], + cdb[4], cdb[5], cdb[6], cdb[7], + cdb[8], cdb[9], cdb[10], cdb[11], + cdb[12], cdb[13], cdb[14], cdb[15]); +} + +static void hpsa_scsi_interpret_error(struct ctlr_info *h, + struct CommandList *cp) +{ + const struct ErrorInfo *ei = cp->err_info; struct device *d = &cp->h->pdev->dev; + const u8 *sd = ei->SenseInfo; - ei = cp->err_info; switch (ei->CommandStatus) { case CMD_TARGET_STATUS: - dev_warn(d, "cmd %p has completed with errors\n", cp); - dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, - ei->ScsiStatus); + hpsa_print_cmd(h, "SCSI status", cp); + if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) + dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", + sd[2] & 0x0f, sd[12], sd[13]); + else + dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus); if (ei->ScsiStatus == 0) dev_warn(d, "SCSI status is abnormally zero. " "(probably indicates selection timeout " @@ -1493,54 +2067,51 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp) "firmware bug, circa July, 2001.)\n"); break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ - dev_info(d, "UNDERRUN\n"); break; case CMD_DATA_OVERRUN: - dev_warn(d, "cp %p has completed with data overrun\n", cp); + hpsa_print_cmd(h, "overrun condition", cp); break; case CMD_INVALID: { /* controller unfortunately reports SCSI passthru's * to non-existent targets as invalid commands. */ - dev_warn(d, "cp %p is reported invalid (probably means " - "target device no longer present)\n", cp); - /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); - print_cmd(cp); */ + hpsa_print_cmd(h, "invalid command", cp); + dev_warn(d, "probably means device no longer present\n"); } break; case CMD_PROTOCOL_ERR: - dev_warn(d, "cp %p has protocol error \n", cp); + hpsa_print_cmd(h, "protocol error", cp); break; case CMD_HARDWARE_ERR: - /* cmd->result = DID_ERROR << 16; */ - dev_warn(d, "cp %p had hardware error\n", cp); + hpsa_print_cmd(h, "hardware error", cp); break; case CMD_CONNECTION_LOST: - dev_warn(d, "cp %p had connection lost\n", cp); + hpsa_print_cmd(h, "connection lost", cp); break; case CMD_ABORTED: - dev_warn(d, "cp %p was aborted\n", cp); + hpsa_print_cmd(h, "aborted", cp); break; case CMD_ABORT_FAILED: - dev_warn(d, "cp %p reports abort failed\n", cp); + hpsa_print_cmd(h, "abort failed", cp); break; case CMD_UNSOLICITED_ABORT: - dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); + hpsa_print_cmd(h, "unsolicited abort", cp); break; case CMD_TIMEOUT: - dev_warn(d, "cp %p timed out\n", cp); + hpsa_print_cmd(h, "timed out", cp); break; case CMD_UNABORTABLE: - dev_warn(d, "Command unabortable\n"); + hpsa_print_cmd(h, "unabortable", cp); break; default: - dev_warn(d, "cp %p returned unknown status %x\n", cp, + hpsa_print_cmd(h, "unknown status", cp); + dev_warn(d, "Unknown command status %x\n", ei->CommandStatus); } } static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, - unsigned char page, unsigned char *buf, + u16 page, unsigned char *buf, unsigned char bufsize) { int rc = IO_OK; @@ -1562,7 +2133,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { - hpsa_scsi_interpret_error(c); + hpsa_scsi_interpret_error(h, c); rc = -1; } out: @@ -1570,7 +2141,39 @@ out: return rc; } -static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) +static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, + unsigned char *scsi3addr, unsigned char page, + struct bmic_controller_parameters *buf, size_t bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_special_alloc(h); + + if (c == NULL) { /* trouble... */ + dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); + return -ENOMEM; + } + + if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, + page, scsi3addr, TYPE_CMD)) { + rc = -1; + goto out; + } + hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + rc = -1; + } +out: + cmd_special_free(h, c); + return rc; + } + +static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, + u8 reset_type) { int rc = IO_OK; struct CommandList *c; @@ -1584,14 +2187,15 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) } /* fill_cmd can't fail here, no data buffer to map. */ - (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, - NULL, 0, 0, scsi3addr, TYPE_MSG); + (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, + scsi3addr, TYPE_MSG); + c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */ hpsa_scsi_do_simple_cmd_core(h, c); /* no unmap needed here because no data xfer. */ ei = c->err_info; if (ei->CommandStatus != 0) { - hpsa_scsi_interpret_error(c); + hpsa_scsi_interpret_error(h, c); rc = -1; } cmd_special_free(h, c); @@ -1608,7 +2212,7 @@ static void hpsa_get_raid_level(struct ctlr_info *h, buf = kzalloc(64, GFP_KERNEL); if (!buf) return; - rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); if (rc == 0) *raid_level = buf[8]; if (*raid_level > RAID_UNKNOWN) @@ -1617,6 +2221,204 @@ static void hpsa_get_raid_level(struct ctlr_info *h, return; } +#define HPSA_MAP_DEBUG +#ifdef HPSA_MAP_DEBUG +static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, + struct raid_map_data *map_buff) +{ + struct raid_map_disk_data *dd = &map_buff->data[0]; + int map, row, col; + u16 map_cnt, row_cnt, disks_per_row; + + if (rc != 0) + return; + + /* Show details only if debugging has been activated. */ + if (h->raid_offload_debug < 2) + return; + + dev_info(&h->pdev->dev, "structure_size = %u\n", + le32_to_cpu(map_buff->structure_size)); + dev_info(&h->pdev->dev, "volume_blk_size = %u\n", + le32_to_cpu(map_buff->volume_blk_size)); + dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", + le64_to_cpu(map_buff->volume_blk_cnt)); + dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", + map_buff->phys_blk_shift); + dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", + map_buff->parity_rotation_shift); + dev_info(&h->pdev->dev, "strip_size = %u\n", + le16_to_cpu(map_buff->strip_size)); + dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", + le64_to_cpu(map_buff->disk_starting_blk)); + dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", + le64_to_cpu(map_buff->disk_blk_cnt)); + dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", + le16_to_cpu(map_buff->data_disks_per_row)); + dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", + le16_to_cpu(map_buff->metadata_disks_per_row)); + dev_info(&h->pdev->dev, "row_cnt = %u\n", + le16_to_cpu(map_buff->row_cnt)); + dev_info(&h->pdev->dev, "layout_map_count = %u\n", + le16_to_cpu(map_buff->layout_map_count)); + dev_info(&h->pdev->dev, "flags = %u\n", + le16_to_cpu(map_buff->flags)); + if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) + dev_info(&h->pdev->dev, "encrypytion = ON\n"); + else + dev_info(&h->pdev->dev, "encrypytion = OFF\n"); + dev_info(&h->pdev->dev, "dekindex = %u\n", + le16_to_cpu(map_buff->dekindex)); + + map_cnt = le16_to_cpu(map_buff->layout_map_count); + for (map = 0; map < map_cnt; map++) { + dev_info(&h->pdev->dev, "Map%u:\n", map); + row_cnt = le16_to_cpu(map_buff->row_cnt); + for (row = 0; row < row_cnt; row++) { + dev_info(&h->pdev->dev, " Row%u:\n", row); + disks_per_row = + le16_to_cpu(map_buff->data_disks_per_row); + for (col = 0; col < disks_per_row; col++, dd++) + dev_info(&h->pdev->dev, + " D%02u: h=0x%04x xor=%u,%u\n", + col, dd->ioaccel_handle, + dd->xor_mult[0], dd->xor_mult[1]); + disks_per_row = + le16_to_cpu(map_buff->metadata_disks_per_row); + for (col = 0; col < disks_per_row; col++, dd++) + dev_info(&h->pdev->dev, + " M%02u: h=0x%04x xor=%u,%u\n", + col, dd->ioaccel_handle, + dd->xor_mult[0], dd->xor_mult[1]); + } + } +} +#else +static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, + __attribute__((unused)) int rc, + __attribute__((unused)) struct raid_map_data *map_buff) +{ +} +#endif + +static int hpsa_get_raid_map(struct ctlr_info *h, + unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ + int rc = 0; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_special_alloc(h); + if (c == NULL) { + dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); + return -ENOMEM; + } + if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, + sizeof(this_device->raid_map), 0, + scsi3addr, TYPE_CMD)) { + dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); + cmd_special_free(h, c); + return -ENOMEM; + } + hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(h, c); + cmd_special_free(h, c); + return -1; + } + cmd_special_free(h, c); + + /* @todo in the future, dynamically allocate RAID map memory */ + if (le32_to_cpu(this_device->raid_map.structure_size) > + sizeof(this_device->raid_map)) { + dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); + rc = -1; + } + hpsa_debug_map_buff(h, rc, &this_device->raid_map); + return rc; +} + +static int hpsa_vpd_page_supported(struct ctlr_info *h, + unsigned char scsi3addr[], u8 page) +{ + int rc; + int i; + int pages; + unsigned char *buf, bufsize; + + buf = kzalloc(256, GFP_KERNEL); + if (!buf) + return 0; + + /* Get the size of the page list first */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, + buf, HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_unsupported; + pages = buf[3]; + if ((pages + HPSA_VPD_HEADER_SZ) <= 255) + bufsize = pages + HPSA_VPD_HEADER_SZ; + else + bufsize = 255; + + /* Get the whole VPD page list */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, + buf, bufsize); + if (rc != 0) + goto exit_unsupported; + + pages = buf[3]; + for (i = 1; i <= pages; i++) + if (buf[3 + i] == page) + goto exit_supported; +exit_unsupported: + kfree(buf); + return 0; +exit_supported: + kfree(buf); + return 1; +} + +static void hpsa_get_ioaccel_status(struct ctlr_info *h, + unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ + int rc; + unsigned char *buf; + u8 ioaccel_status; + + this_device->offload_config = 0; + this_device->offload_enabled = 0; + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return; + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) + goto out; + rc = hpsa_scsi_do_inquiry(h, scsi3addr, + VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); + if (rc != 0) + goto out; + +#define IOACCEL_STATUS_BYTE 4 +#define OFFLOAD_CONFIGURED_BIT 0x01 +#define OFFLOAD_ENABLED_BIT 0x02 + ioaccel_status = buf[IOACCEL_STATUS_BYTE]; + this_device->offload_config = + !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); + if (this_device->offload_config) { + this_device->offload_enabled = + !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + if (hpsa_get_raid_map(h, scsi3addr, this_device)) + this_device->offload_enabled = 0; + } +out: + kfree(buf); + return; +} + /* Get the device id from inquiry page 0x83 */ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char *device_id, int buflen) @@ -1628,8 +2430,8 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, buflen = 16; buf = kzalloc(64, GFP_KERNEL); if (!buf) - return -1; - rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); + return -ENOMEM; + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); if (rc == 0) memcpy(device_id, &buf[8], buflen); kfree(buf); @@ -1663,8 +2465,16 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { - hpsa_scsi_interpret_error(c); + hpsa_scsi_interpret_error(h, c); rc = -1; + } else { + if (buf->extended_response_flag != extended_response) { + dev_err(&h->pdev->dev, + "report luns requested format %u, got %u\n", + extended_response, + buf->extended_response_flag); + rc = -1; + } } out: cmd_special_free(h, c); @@ -1692,6 +2502,111 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, device->lun = lun; } +/* Use VPD inquiry to get details of volume status */ +static int hpsa_get_volume_status(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + int rc; + int status; + int size; + unsigned char *buf; + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return HPSA_VPD_LV_STATUS_UNSUPPORTED; + + /* Does controller have VPD for logical volume status? */ + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) + goto exit_failed; + + /* Get the size of the VPD return buffer */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, + buf, HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_failed; + size = buf[3]; + + /* Now get the whole VPD buffer */ + rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, + buf, size + HPSA_VPD_HEADER_SZ); + if (rc != 0) + goto exit_failed; + status = buf[4]; /* status byte */ + + kfree(buf); + return status; +exit_failed: + kfree(buf); + return HPSA_VPD_LV_STATUS_UNSUPPORTED; +} + +/* Determine offline status of a volume. + * Return either: + * 0 (not offline) + * 0xff (offline for unknown reasons) + * # (integer code indicating one of several NOT READY states + * describing why a volume is to be kept offline) + */ +static int hpsa_volume_offline(struct ctlr_info *h, + unsigned char scsi3addr[]) +{ + struct CommandList *c; + unsigned char *sense, sense_key, asc, ascq; + int ldstat = 0; + u16 cmd_status; + u8 scsi_status; +#define ASC_LUN_NOT_READY 0x04 +#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 +#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 + + c = cmd_alloc(h); + if (!c) + return 0; + (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); + hpsa_scsi_do_simple_cmd_core(h, c); + sense = c->err_info->SenseInfo; + sense_key = sense[2]; + asc = sense[12]; + ascq = sense[13]; + cmd_status = c->err_info->CommandStatus; + scsi_status = c->err_info->ScsiStatus; + cmd_free(h, c); + /* Is the volume 'not ready'? */ + if (cmd_status != CMD_TARGET_STATUS || + scsi_status != SAM_STAT_CHECK_CONDITION || + sense_key != NOT_READY || + asc != ASC_LUN_NOT_READY) { + return 0; + } + + /* Determine the reason for not ready state */ + ldstat = hpsa_get_volume_status(h, scsi3addr); + + /* Keep volume offline in certain cases: */ + switch (ldstat) { + case HPSA_LV_UNDERGOING_ERASE: + case HPSA_LV_UNDERGOING_RPI: + case HPSA_LV_PENDING_RPI: + case HPSA_LV_ENCRYPTED_NO_KEY: + case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: + case HPSA_LV_UNDERGOING_ENCRYPTION: + case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: + case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: + return ldstat; + case HPSA_VPD_LV_STATUS_UNSUPPORTED: + /* If VPD status page isn't available, + * use ASC/ASCQ to determine state + */ + if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || + (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) + return ldstat; + break; + default: + break; + } + return 0; +} + static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) @@ -1730,10 +2645,22 @@ static int hpsa_update_device_info(struct ctlr_info *h, sizeof(this_device->device_id)); if (this_device->devtype == TYPE_DISK && - is_logical_dev_addr_mode(scsi3addr)) + is_logical_dev_addr_mode(scsi3addr)) { + int volume_offline; + hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); - else + if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) + hpsa_get_ioaccel_status(h, scsi3addr, this_device); + volume_offline = hpsa_volume_offline(h, scsi3addr); + if (volume_offline < 0 || volume_offline > 0xff) + volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; + this_device->volume_offline = volume_offline & 0xff; + } else { this_device->raid_level = RAID_UNKNOWN; + this_device->offload_config = 0; + this_device->offload_enabled = 0; + this_device->volume_offline = 0; + } if (is_OBDR_device) { /* See if this is a One-Button-Disaster-Recovery device @@ -1759,6 +2686,7 @@ static unsigned char *ext_target_model[] = { "MSA2312", "MSA2324", "P2000 G3 SAS", + "MSA 2040 SAS", NULL, }; @@ -1862,6 +2790,101 @@ static int add_ext_target_dev(struct ctlr_info *h, } /* + * Get address of physical disk used for an ioaccel2 mode command: + * 1. Extract ioaccel2 handle from the command. + * 2. Find a matching ioaccel2 handle from list of physical disks. + * 3. Return: + * 1 and set scsi3addr to address of matching physical + * 0 if no matching physical disk was found. + */ +static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, + struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) +{ + struct ReportExtendedLUNdata *physicals = NULL; + int responsesize = 24; /* size of physical extended response */ + int extended = 2; /* flag forces reporting 'other dev info'. */ + int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; + u32 nphysicals = 0; /* number of reported physical devs */ + int found = 0; /* found match (1) or not (0) */ + u32 find; /* handle we need to match */ + int i; + struct scsi_cmnd *scmd; /* scsi command within request being aborted */ + struct hpsa_scsi_dev_t *d; /* device of request being aborted */ + struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ + u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */ + u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */ + + if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) + return 0; /* no match */ + + /* point to the ioaccel2 device handle */ + c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; + if (c2a == NULL) + return 0; /* no match */ + + scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; + if (scmd == NULL) + return 0; /* no match */ + + d = scmd->device->hostdata; + if (d == NULL) + return 0; /* no match */ + + it_nexus = cpu_to_le32((u32) d->ioaccel_handle); + scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); + find = c2a->scsi_nexus; + + if (h->raid_offload_debug > 0) + dev_info(&h->pdev->dev, + "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", + __func__, scsi_nexus, + d->device_id[0], d->device_id[1], d->device_id[2], + d->device_id[3], d->device_id[4], d->device_id[5], + d->device_id[6], d->device_id[7], d->device_id[8], + d->device_id[9], d->device_id[10], d->device_id[11], + d->device_id[12], d->device_id[13], d->device_id[14], + d->device_id[15]); + + /* Get the list of physical devices */ + physicals = kzalloc(reportsize, GFP_KERNEL); + if (physicals == NULL) + return 0; + if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, + reportsize, extended)) { + dev_err(&h->pdev->dev, + "Can't lookup %s device handle: report physical LUNs failed.\n", + "HP SSD Smart Path"); + kfree(physicals); + return 0; + } + nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / + responsesize; + + /* find ioaccel2 handle in list of physicals: */ + for (i = 0; i < nphysicals; i++) { + struct ext_report_lun_entry *entry = &physicals->LUN[i]; + + /* handle is in bytes 28-31 of each lun */ + if (entry->ioaccel_handle != find) + continue; /* didn't match */ + found = 1; + memcpy(scsi3addr, entry->lunid, 8); + if (h->raid_offload_debug > 0) + dev_info(&h->pdev->dev, + "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", + __func__, find, + entry->ioaccel_handle, scsi3addr); + break; /* found it */ + } + + kfree(physicals); + if (found) + return 1; + else + return 0; + +} +/* * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, * logdev. The number of luns in physdev and logdev are returned in * *nphysicals and *nlogicals, respectively. @@ -1869,14 +2892,26 @@ static int add_ext_target_dev(struct ctlr_info *h, */ static int hpsa_gather_lun_info(struct ctlr_info *h, int reportlunsize, - struct ReportLUNdata *physdev, u32 *nphysicals, + struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode, struct ReportLUNdata *logdev, u32 *nlogicals) { - if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { + int physical_entry_size = 8; + + *physical_mode = 0; + + /* For I/O accelerator mode we need to read physical device handles */ + if (h->transMethod & CFGTBL_Trans_io_accel1 || + h->transMethod & CFGTBL_Trans_io_accel2) { + *physical_mode = HPSA_REPORT_PHYS_EXTENDED; + physical_entry_size = 24; + } + if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, + *physical_mode)) { dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); return -1; } - *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; + *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / + physical_entry_size; if (*nphysicals > HPSA_MAX_PHYS_LUN) { dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, @@ -1907,7 +2942,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, } u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, - int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, + int nphysicals, int nlogicals, + struct ReportExtendedLUNdata *physdev_list, struct ReportLUNdata *logdev_list) { /* Helper function, figure out where the LUN ID info is coming from @@ -1922,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, return RAID_CTLR_LUNID; if (i < logicals_start) - return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; + return &physdev_list->LUN[i - + (raid_ctlr_position == 0)].lunid[0]; if (i < last_device) return &logdev_list->LUN[i - nphysicals - @@ -1931,6 +2968,29 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, return NULL; } +static int hpsa_hba_mode_enabled(struct ctlr_info *h) +{ + int rc; + int hba_mode_enabled; + struct bmic_controller_parameters *ctlr_params; + ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), + GFP_KERNEL); + + if (!ctlr_params) + return -ENOMEM; + rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, + sizeof(struct bmic_controller_parameters)); + if (rc) { + kfree(ctlr_params); + return rc; + } + + hba_mode_enabled = + ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); + kfree(ctlr_params); + return hba_mode_enabled; +} + static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) { /* the idea here is we could get notified @@ -1943,16 +3003,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) * tell which devices we already know about, vs. new * devices, vs. disappearing devices. */ - struct ReportLUNdata *physdev_list = NULL; + struct ReportExtendedLUNdata *physdev_list = NULL; struct ReportLUNdata *logdev_list = NULL; u32 nphysicals = 0; u32 nlogicals = 0; + int physical_mode = 0; u32 ndev_allocated = 0; struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; int ncurrent = 0; - int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; + int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; int i, n_ext_target_devs, ndevs_to_allocate; int raid_ctlr_position; + int rescan_hba_mode; DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); @@ -1966,8 +3028,20 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) } memset(lunzerobits, 0, sizeof(lunzerobits)); - if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, - logdev_list, &nlogicals)) + rescan_hba_mode = hpsa_hba_mode_enabled(h); + if (rescan_hba_mode < 0) + goto out; + + if (!h->hba_mode_enabled && rescan_hba_mode) + dev_warn(&h->pdev->dev, "HBA mode enabled\n"); + else if (h->hba_mode_enabled && !rescan_hba_mode) + dev_warn(&h->pdev->dev, "HBA mode disabled\n"); + + h->hba_mode_enabled = rescan_hba_mode; + + if (hpsa_gather_lun_info(h, reportlunsize, + (struct ReportLUNdata *) physdev_list, &nphysicals, + &physical_mode, logdev_list, &nlogicals)) goto out; /* We might see up to the maximum number of logical and physical disks @@ -1994,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) ndev_allocated++; } - if (unlikely(is_scsi_rev_5(h))) + if (is_scsi_rev_5(h)) raid_ctlr_position = 0; else raid_ctlr_position = nphysicals + nlogicals; @@ -2048,9 +3122,28 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) ncurrent++; break; case TYPE_DISK: - if (i < nphysicals) + if (h->hba_mode_enabled) { + /* never use raid mapper in HBA mode */ + this_device->offload_enabled = 0; + ncurrent++; break; - ncurrent++; + } else if (h->acciopath_status) { + if (i >= nphysicals) { + ncurrent++; + break; + } + } else { + if (i < nphysicals) + break; + ncurrent++; + break; + } + if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { + memcpy(&this_device->ioaccel_handle, + &lunaddrbytes[20], + sizeof(this_device->ioaccel_handle)); + ncurrent++; + } break; case TYPE_TAPE: case TYPE_MEDIUM_CHANGER: @@ -2120,7 +3213,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h, curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); curr_sg->Len = len; - curr_sg->Ext = 0; /* we are not chaining */ + curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST; curr_sg++; } @@ -2144,6 +3237,726 @@ sglist_finished: return 0; } +#define IO_ACCEL_INELIGIBLE (1) +static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) +{ + int is_write = 0; + u32 block; + u32 block_cnt; + + /* Perform some CDB fixups if needed using 10 byte reads/writes only */ + switch (cdb[0]) { + case WRITE_6: + case WRITE_12: + is_write = 1; + case READ_6: + case READ_12: + if (*cdb_len == 6) { + block = (((u32) cdb[2]) << 8) | cdb[3]; + block_cnt = cdb[4]; + } else { + BUG_ON(*cdb_len != 12); + block = (((u32) cdb[2]) << 24) | + (((u32) cdb[3]) << 16) | + (((u32) cdb[4]) << 8) | + cdb[5]; + block_cnt = + (((u32) cdb[6]) << 24) | + (((u32) cdb[7]) << 16) | + (((u32) cdb[8]) << 8) | + cdb[9]; + } + if (block_cnt > 0xffff) + return IO_ACCEL_INELIGIBLE; + + cdb[0] = is_write ? WRITE_10 : READ_10; + cdb[1] = 0; + cdb[2] = (u8) (block >> 24); + cdb[3] = (u8) (block >> 16); + cdb[4] = (u8) (block >> 8); + cdb[5] = (u8) (block); + cdb[6] = 0; + cdb[7] = (u8) (block_cnt >> 8); + cdb[8] = (u8) (block_cnt); + cdb[9] = 0; + *cdb_len = 10; + break; + } + return 0; +} + +static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + unsigned int len; + unsigned int total_len = 0; + struct scatterlist *sg; + u64 addr64; + int use_sg, i; + struct SGDescriptor *curr_sg; + u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; + + /* TODO: implement chaining support */ + if (scsi_sg_count(cmd) > h->ioaccel_maxsg) + return IO_ACCEL_INELIGIBLE; + + BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); + + if (fixup_ioaccel_cdb(cdb, &cdb_len)) + return IO_ACCEL_INELIGIBLE; + + c->cmd_type = CMD_IOACCEL1; + + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + + (c->cmdindex * sizeof(*cp)); + BUG_ON(c->busaddr & 0x0000007F); + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) + return use_sg; + + if (use_sg) { + curr_sg = cp->SG; + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (u64) sg_dma_address(sg); + len = sg_dma_len(sg); + total_len += len; + curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); + curr_sg->Addr.upper = + (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); + curr_sg->Len = len; + + if (i == (scsi_sg_count(cmd) - 1)) + curr_sg->Ext = HPSA_SG_LAST; + else + curr_sg->Ext = 0; /* we are not chaining */ + curr_sg++; + } + + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + control |= IOACCEL1_CONTROL_DATA_OUT; + break; + case DMA_FROM_DEVICE: + control |= IOACCEL1_CONTROL_DATA_IN; + break; + case DMA_NONE: + control |= IOACCEL1_CONTROL_NODATAXFER; + break; + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + } else { + control |= IOACCEL1_CONTROL_NODATAXFER; + } + + c->Header.SGList = use_sg; + /* Fill out the command structure to submit */ + cp->dev_handle = ioaccel_handle & 0xFFFF; + cp->transfer_len = total_len; + cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | + (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); + cp->control = control; + memcpy(cp->CDB, cdb, cdb_len); + memcpy(cp->CISS_LUN, scsi3addr, 8); + /* Tag was already set at init time. */ + enqueue_cmd_and_start_io(h, c); + return 0; +} + +/* + * Queue a command directly to a device behind the controller using the + * I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, + struct CommandList *c) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + + return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, + cmd->cmnd, cmd->cmd_len, dev->scsi3addr); +} + +/* + * Set encryption parameters for the ioaccel2 request + */ +static void set_encrypt_ioaccel2(struct ctlr_info *h, + struct CommandList *c, struct io_accel2_cmd *cp) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + struct raid_map_data *map = &dev->raid_map; + u64 first_block; + + BUG_ON(!(dev->offload_config && dev->offload_enabled)); + + /* Are we doing encryption on this device */ + if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) + return; + /* Set the data encryption key index. */ + cp->dekindex = map->dekindex; + + /* Set the encryption enable flag, encoded into direction field. */ + cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; + + /* Set encryption tweak values based on logical block address + * If block size is 512, tweak value is LBA. + * For other block sizes, tweak is (LBA * block size)/ 512) + */ + switch (cmd->cmnd[0]) { + /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ + case WRITE_6: + case READ_6: + if (map->volume_blk_size == 512) { + cp->tweak_lower = + (((u32) cmd->cmnd[2]) << 8) | + cmd->cmnd[3]; + cp->tweak_upper = 0; + } else { + first_block = + (((u64) cmd->cmnd[2]) << 8) | + cmd->cmnd[3]; + first_block = (first_block * map->volume_blk_size)/512; + cp->tweak_lower = (u32)first_block; + cp->tweak_upper = (u32)(first_block >> 32); + } + break; + case WRITE_10: + case READ_10: + if (map->volume_blk_size == 512) { + cp->tweak_lower = + (((u32) cmd->cmnd[2]) << 24) | + (((u32) cmd->cmnd[3]) << 16) | + (((u32) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + cp->tweak_upper = 0; + } else { + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + first_block = (first_block * map->volume_blk_size)/512; + cp->tweak_lower = (u32)first_block; + cp->tweak_upper = (u32)(first_block >> 32); + } + break; + /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ + case WRITE_12: + case READ_12: + if (map->volume_blk_size == 512) { + cp->tweak_lower = + (((u32) cmd->cmnd[2]) << 24) | + (((u32) cmd->cmnd[3]) << 16) | + (((u32) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + cp->tweak_upper = 0; + } else { + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + first_block = (first_block * map->volume_blk_size)/512; + cp->tweak_lower = (u32)first_block; + cp->tweak_upper = (u32)(first_block >> 32); + } + break; + case WRITE_16: + case READ_16: + if (map->volume_blk_size == 512) { + cp->tweak_lower = + (((u32) cmd->cmnd[6]) << 24) | + (((u32) cmd->cmnd[7]) << 16) | + (((u32) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + cp->tweak_upper = + (((u32) cmd->cmnd[2]) << 24) | + (((u32) cmd->cmnd[3]) << 16) | + (((u32) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + } else { + first_block = + (((u64) cmd->cmnd[2]) << 56) | + (((u64) cmd->cmnd[3]) << 48) | + (((u64) cmd->cmnd[4]) << 40) | + (((u64) cmd->cmnd[5]) << 32) | + (((u64) cmd->cmnd[6]) << 24) | + (((u64) cmd->cmnd[7]) << 16) | + (((u64) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + first_block = (first_block * map->volume_blk_size)/512; + cp->tweak_lower = (u32)first_block; + cp->tweak_upper = (u32)(first_block >> 32); + } + break; + default: + dev_err(&h->pdev->dev, + "ERROR: %s: IOACCEL request CDB size not supported for encryption\n", + __func__); + BUG(); + break; + } +} + +static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + struct ioaccel2_sg_element *curr_sg; + int use_sg, i; + struct scatterlist *sg; + u64 addr64; + u32 len; + u32 total_len = 0; + + if (scsi_sg_count(cmd) > h->ioaccel_maxsg) + return IO_ACCEL_INELIGIBLE; + + if (fixup_ioaccel_cdb(cdb, &cdb_len)) + return IO_ACCEL_INELIGIBLE; + c->cmd_type = CMD_IOACCEL2; + /* Adjust the DMA address to point to the accelerated command buffer */ + c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + + (c->cmdindex * sizeof(*cp)); + BUG_ON(c->busaddr & 0x0000007F); + + memset(cp, 0, sizeof(*cp)); + cp->IU_type = IOACCEL2_IU_TYPE; + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) + return use_sg; + + if (use_sg) { + BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); + curr_sg = cp->sg; + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (u64) sg_dma_address(sg); + len = sg_dma_len(sg); + total_len += len; + curr_sg->address = cpu_to_le64(addr64); + curr_sg->length = cpu_to_le32(len); + curr_sg->reserved[0] = 0; + curr_sg->reserved[1] = 0; + curr_sg->reserved[2] = 0; + curr_sg->chain_indicator = 0; + curr_sg++; + } + + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_DATA_OUT; + break; + case DMA_FROM_DEVICE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_DATA_IN; + break; + case DMA_NONE: + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_NO_DATA; + break; + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + } else { + cp->direction &= ~IOACCEL2_DIRECTION_MASK; + cp->direction |= IOACCEL2_DIR_NO_DATA; + } + + /* Set encryption parameters, if necessary */ + set_encrypt_ioaccel2(h, c, cp); + + cp->scsi_nexus = ioaccel_handle; + cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | + DIRECT_LOOKUP_BIT; + memcpy(cp->cdb, cdb, sizeof(cp->cdb)); + + /* fill in sg elements */ + cp->sg_count = (u8) use_sg; + + cp->data_len = cpu_to_le32(total_len); + cp->err_ptr = cpu_to_le64(c->busaddr + + offsetof(struct io_accel2_cmd, error_data)); + cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); + + enqueue_cmd_and_start_io(h, c); + return 0; +} + +/* + * Queue a command to the correct I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, + struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, + u8 *scsi3addr) +{ + if (h->transMethod & CFGTBL_Trans_io_accel1) + return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, + cdb, cdb_len, scsi3addr); + else + return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, + cdb, cdb_len, scsi3addr); +} + +static void raid_map_helper(struct raid_map_data *map, + int offload_to_mirror, u32 *map_index, u32 *current_group) +{ + if (offload_to_mirror == 0) { + /* use physical disk in the first mirrored group. */ + *map_index %= map->data_disks_per_row; + return; + } + do { + /* determine mirror group that *map_index indicates */ + *current_group = *map_index / map->data_disks_per_row; + if (offload_to_mirror == *current_group) + continue; + if (*current_group < (map->layout_map_count - 1)) { + /* select map index from next group */ + *map_index += map->data_disks_per_row; + (*current_group)++; + } else { + /* select map index from first group */ + *map_index %= map->data_disks_per_row; + *current_group = 0; + } + } while (offload_to_mirror != *current_group); +} + +/* + * Attempt to perform offload RAID mapping for a logical volume I/O. + */ +static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, + struct CommandList *c) +{ + struct scsi_cmnd *cmd = c->scsi_cmd; + struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + struct raid_map_data *map = &dev->raid_map; + struct raid_map_disk_data *dd = &map->data[0]; + int is_write = 0; + u32 map_index; + u64 first_block, last_block; + u32 block_cnt; + u32 blocks_per_row; + u64 first_row, last_row; + u32 first_row_offset, last_row_offset; + u32 first_column, last_column; + u64 r0_first_row, r0_last_row; + u32 r5or6_blocks_per_row; + u64 r5or6_first_row, r5or6_last_row; + u32 r5or6_first_row_offset, r5or6_last_row_offset; + u32 r5or6_first_column, r5or6_last_column; + u32 total_disks_per_row; + u32 stripesize; + u32 first_group, last_group, current_group; + u32 map_row; + u32 disk_handle; + u64 disk_block; + u32 disk_block_cnt; + u8 cdb[16]; + u8 cdb_len; +#if BITS_PER_LONG == 32 + u64 tmpdiv; +#endif + int offload_to_mirror; + + BUG_ON(!(dev->offload_config && dev->offload_enabled)); + + /* check for valid opcode, get LBA and block count */ + switch (cmd->cmnd[0]) { + case WRITE_6: + is_write = 1; + case READ_6: + first_block = + (((u64) cmd->cmnd[2]) << 8) | + cmd->cmnd[3]; + block_cnt = cmd->cmnd[4]; + break; + case WRITE_10: + is_write = 1; + case READ_10: + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + block_cnt = + (((u32) cmd->cmnd[7]) << 8) | + cmd->cmnd[8]; + break; + case WRITE_12: + is_write = 1; + case READ_12: + first_block = + (((u64) cmd->cmnd[2]) << 24) | + (((u64) cmd->cmnd[3]) << 16) | + (((u64) cmd->cmnd[4]) << 8) | + cmd->cmnd[5]; + block_cnt = + (((u32) cmd->cmnd[6]) << 24) | + (((u32) cmd->cmnd[7]) << 16) | + (((u32) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + break; + case WRITE_16: + is_write = 1; + case READ_16: + first_block = + (((u64) cmd->cmnd[2]) << 56) | + (((u64) cmd->cmnd[3]) << 48) | + (((u64) cmd->cmnd[4]) << 40) | + (((u64) cmd->cmnd[5]) << 32) | + (((u64) cmd->cmnd[6]) << 24) | + (((u64) cmd->cmnd[7]) << 16) | + (((u64) cmd->cmnd[8]) << 8) | + cmd->cmnd[9]; + block_cnt = + (((u32) cmd->cmnd[10]) << 24) | + (((u32) cmd->cmnd[11]) << 16) | + (((u32) cmd->cmnd[12]) << 8) | + cmd->cmnd[13]; + break; + default: + return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ + } + BUG_ON(block_cnt == 0); + last_block = first_block + block_cnt - 1; + + /* check for write to non-RAID-0 */ + if (is_write && dev->raid_level != 0) + return IO_ACCEL_INELIGIBLE; + + /* check for invalid block or wraparound */ + if (last_block >= map->volume_blk_cnt || last_block < first_block) + return IO_ACCEL_INELIGIBLE; + + /* calculate stripe information for the request */ + blocks_per_row = map->data_disks_per_row * map->strip_size; +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + (void) do_div(tmpdiv, blocks_per_row); + first_row = tmpdiv; + tmpdiv = last_block; + (void) do_div(tmpdiv, blocks_per_row); + last_row = tmpdiv; + first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); + last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); + tmpdiv = first_row_offset; + (void) do_div(tmpdiv, map->strip_size); + first_column = tmpdiv; + tmpdiv = last_row_offset; + (void) do_div(tmpdiv, map->strip_size); + last_column = tmpdiv; +#else + first_row = first_block / blocks_per_row; + last_row = last_block / blocks_per_row; + first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); + last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); + first_column = first_row_offset / map->strip_size; + last_column = last_row_offset / map->strip_size; +#endif + + /* if this isn't a single row/column then give to the controller */ + if ((first_row != last_row) || (first_column != last_column)) + return IO_ACCEL_INELIGIBLE; + + /* proceeding with driver mapping */ + total_disks_per_row = map->data_disks_per_row + + map->metadata_disks_per_row; + map_row = ((u32)(first_row >> map->parity_rotation_shift)) % + map->row_cnt; + map_index = (map_row * total_disks_per_row) + first_column; + + switch (dev->raid_level) { + case HPSA_RAID_0: + break; /* nothing special to do */ + case HPSA_RAID_1: + /* Handles load balance across RAID 1 members. + * (2-drive R1 and R10 with even # of drives.) + * Appropriate for SSDs, not optimal for HDDs + */ + BUG_ON(map->layout_map_count != 2); + if (dev->offload_to_mirror) + map_index += map->data_disks_per_row; + dev->offload_to_mirror = !dev->offload_to_mirror; + break; + case HPSA_RAID_ADM: + /* Handles N-way mirrors (R1-ADM) + * and R10 with # of drives divisible by 3.) + */ + BUG_ON(map->layout_map_count != 3); + + offload_to_mirror = dev->offload_to_mirror; + raid_map_helper(map, offload_to_mirror, + &map_index, ¤t_group); + /* set mirror group to use next time */ + offload_to_mirror = + (offload_to_mirror >= map->layout_map_count - 1) + ? 0 : offload_to_mirror + 1; + /* FIXME: remove after debug/dev */ + BUG_ON(offload_to_mirror >= map->layout_map_count); + dev_warn(&h->pdev->dev, + "DEBUG: Using physical disk map index %d from mirror group %d\n", + map_index, offload_to_mirror); + dev->offload_to_mirror = offload_to_mirror; + /* Avoid direct use of dev->offload_to_mirror within this + * function since multiple threads might simultaneously + * increment it beyond the range of dev->layout_map_count -1. + */ + break; + case HPSA_RAID_5: + case HPSA_RAID_6: + if (map->layout_map_count <= 1) + break; + + /* Verify first and last block are in same RAID group */ + r5or6_blocks_per_row = + map->strip_size * map->data_disks_per_row; + BUG_ON(r5or6_blocks_per_row == 0); + stripesize = r5or6_blocks_per_row * map->layout_map_count; +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + first_group = do_div(tmpdiv, stripesize); + tmpdiv = first_group; + (void) do_div(tmpdiv, r5or6_blocks_per_row); + first_group = tmpdiv; + tmpdiv = last_block; + last_group = do_div(tmpdiv, stripesize); + tmpdiv = last_group; + (void) do_div(tmpdiv, r5or6_blocks_per_row); + last_group = tmpdiv; +#else + first_group = (first_block % stripesize) / r5or6_blocks_per_row; + last_group = (last_block % stripesize) / r5or6_blocks_per_row; +#endif + if (first_group != last_group) + return IO_ACCEL_INELIGIBLE; + + /* Verify request is in a single row of RAID 5/6 */ +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + (void) do_div(tmpdiv, stripesize); + first_row = r5or6_first_row = r0_first_row = tmpdiv; + tmpdiv = last_block; + (void) do_div(tmpdiv, stripesize); + r5or6_last_row = r0_last_row = tmpdiv; +#else + first_row = r5or6_first_row = r0_first_row = + first_block / stripesize; + r5or6_last_row = r0_last_row = last_block / stripesize; +#endif + if (r5or6_first_row != r5or6_last_row) + return IO_ACCEL_INELIGIBLE; + + + /* Verify request is in a single column */ +#if BITS_PER_LONG == 32 + tmpdiv = first_block; + first_row_offset = do_div(tmpdiv, stripesize); + tmpdiv = first_row_offset; + first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); + r5or6_first_row_offset = first_row_offset; + tmpdiv = last_block; + r5or6_last_row_offset = do_div(tmpdiv, stripesize); + tmpdiv = r5or6_last_row_offset; + r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); + tmpdiv = r5or6_first_row_offset; + (void) do_div(tmpdiv, map->strip_size); + first_column = r5or6_first_column = tmpdiv; + tmpdiv = r5or6_last_row_offset; + (void) do_div(tmpdiv, map->strip_size); + r5or6_last_column = tmpdiv; +#else + first_row_offset = r5or6_first_row_offset = + (u32)((first_block % stripesize) % + r5or6_blocks_per_row); + + r5or6_last_row_offset = + (u32)((last_block % stripesize) % + r5or6_blocks_per_row); + + first_column = r5or6_first_column = + r5or6_first_row_offset / map->strip_size; + r5or6_last_column = + r5or6_last_row_offset / map->strip_size; +#endif + if (r5or6_first_column != r5or6_last_column) + return IO_ACCEL_INELIGIBLE; + + /* Request is eligible */ + map_row = ((u32)(first_row >> map->parity_rotation_shift)) % + map->row_cnt; + + map_index = (first_group * + (map->row_cnt * total_disks_per_row)) + + (map_row * total_disks_per_row) + first_column; + break; + default: + return IO_ACCEL_INELIGIBLE; + } + + disk_handle = dd[map_index].ioaccel_handle; + disk_block = map->disk_starting_blk + (first_row * map->strip_size) + + (first_row_offset - (first_column * map->strip_size)); + disk_block_cnt = block_cnt; + + /* handle differing logical/physical block sizes */ + if (map->phys_blk_shift) { + disk_block <<= map->phys_blk_shift; + disk_block_cnt <<= map->phys_blk_shift; + } + BUG_ON(disk_block_cnt > 0xffff); + + /* build the new CDB for the physical disk I/O */ + if (disk_block > 0xffffffff) { + cdb[0] = is_write ? WRITE_16 : READ_16; + cdb[1] = 0; + cdb[2] = (u8) (disk_block >> 56); + cdb[3] = (u8) (disk_block >> 48); + cdb[4] = (u8) (disk_block >> 40); + cdb[5] = (u8) (disk_block >> 32); + cdb[6] = (u8) (disk_block >> 24); + cdb[7] = (u8) (disk_block >> 16); + cdb[8] = (u8) (disk_block >> 8); + cdb[9] = (u8) (disk_block); + cdb[10] = (u8) (disk_block_cnt >> 24); + cdb[11] = (u8) (disk_block_cnt >> 16); + cdb[12] = (u8) (disk_block_cnt >> 8); + cdb[13] = (u8) (disk_block_cnt); + cdb[14] = 0; + cdb[15] = 0; + cdb_len = 16; + } else { + cdb[0] = is_write ? WRITE_10 : READ_10; + cdb[1] = 0; + cdb[2] = (u8) (disk_block >> 24); + cdb[3] = (u8) (disk_block >> 16); + cdb[4] = (u8) (disk_block >> 8); + cdb[5] = (u8) (disk_block); + cdb[6] = 0; + cdb[7] = (u8) (disk_block_cnt >> 8); + cdb[8] = (u8) (disk_block_cnt); + cdb[9] = 0; + cdb_len = 10; + } + return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, + dev->scsi3addr); +} static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) @@ -2152,7 +3965,7 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, struct hpsa_scsi_dev_t *dev; unsigned char scsi3addr[8]; struct CommandList *c; - unsigned long flags; + int rc = 0; /* Get the ptr to our adapter structure out of cmd->host. */ h = sdev_to_hba(cmd->device); @@ -2164,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, } memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); + if (unlikely(lockup_detected(h))) { cmd->result = DID_ERROR << 16; done(cmd); return 0; } - spin_unlock_irqrestore(&h->lock, flags); c = cmd_alloc(h); if (c == NULL) { /* trouble... */ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); @@ -2187,6 +3997,32 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; + + /* Call alternate submit routine for I/O accelerated commands. + * Retries always go down the normal I/O path. + */ + if (likely(cmd->retries == 0 && + cmd->request->cmd_type == REQ_TYPE_FS && + h->acciopath_status)) { + if (dev->offload_enabled) { + rc = hpsa_scsi_ioaccel_raid_map(h, c); + if (rc == 0) + return 0; /* Sent on ioaccel path */ + if (rc < 0) { /* scsi_dma_map failed. */ + cmd_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + } else if (dev->ioaccel_handle) { + rc = hpsa_scsi_ioaccel_direct_map(h, c); + if (rc == 0) + return 0; /* Sent on direct map path */ + if (rc < 0) { /* scsi_dma_map failed. */ + cmd_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + } + c->Header.ReplyQueue = 0; /* unused in simple mode */ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); @@ -2246,11 +4082,35 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, static DEF_SCSI_QCMD(hpsa_scsi_queue_command) +static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) +{ + unsigned long flags; + + /* + * Don't let rescans be initiated on a controller known + * to be locked up. If the controller locks up *during* + * a rescan, that thread is probably hosed, but at least + * we can prevent new rescan threads from piling up on a + * locked up controller. + */ + if (unlikely(lockup_detected(h))) { + spin_lock_irqsave(&h->scan_lock, flags); + h->scan_finished = 1; + wake_up_all(&h->scan_wait_queue); + spin_unlock_irqrestore(&h->scan_lock, flags); + return 1; + } + return 0; +} + static void hpsa_scan_start(struct Scsi_Host *sh) { struct ctlr_info *h = shost_to_hba(sh); unsigned long flags; + if (do_not_scan_if_controller_locked_up(h)) + return; + /* wait until any scan already in progress is finished. */ while (1) { spin_lock_irqsave(&h->scan_lock, flags); @@ -2267,6 +4127,9 @@ static void hpsa_scan_start(struct Scsi_Host *sh) h->scan_finished = 0; /* mark scan as in progress */ spin_unlock_irqrestore(&h->scan_lock, flags); + if (do_not_scan_if_controller_locked_up(h)) + return; + hpsa_update_scsi_devices(h, h->scsi_host->host_no); spin_lock_irqsave(&h->scan_lock, flags); @@ -2330,7 +4193,10 @@ static int hpsa_register_scsi(struct ctlr_info *h) sh->max_lun = HPSA_MAX_LUN; sh->max_id = HPSA_MAX_LUN; sh->can_queue = h->nr_cmds; - sh->cmd_per_lun = h->nr_cmds; + if (h->hba_mode_enabled) + sh->cmd_per_lun = 7; + else + sh->cmd_per_lun = h->nr_cmds; sh->sg_tablesize = h->maxsgentries; h->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; @@ -2356,7 +4222,7 @@ static int hpsa_register_scsi(struct ctlr_info *h) static int wait_for_device_to_become_ready(struct ctlr_info *h, unsigned char lunaddr[]) { - int rc = 0; + int rc; int count = 0; int waittime = 1; /* seconds */ struct CommandList *c; @@ -2376,6 +4242,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h, */ msleep(1000 * waittime); count++; + rc = 0; /* Device ready. */ /* Increase wait time with each try, up to a point. */ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) @@ -2432,7 +4299,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", h->scsi_host->host_no, dev->bus, dev->target, dev->lun); /* send a reset to the SCSI LUN which the command was sent to */ - rc = hpsa_send_reset(h, dev->scsi3addr); + rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN); if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) return SUCCESS; @@ -2455,12 +4322,36 @@ static void swizzle_abort_tag(u8 *tag) tag[7] = original_tag[4]; } +static void hpsa_get_tag(struct ctlr_info *h, + struct CommandList *c, u32 *taglower, u32 *tagupper) +{ + if (c->cmd_type == CMD_IOACCEL1) { + struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) + &h->ioaccel_cmd_pool[c->cmdindex]; + *tagupper = cm1->Tag.upper; + *taglower = cm1->Tag.lower; + return; + } + if (c->cmd_type == CMD_IOACCEL2) { + struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) + &h->ioaccel2_cmd_pool[c->cmdindex]; + /* upper tag not used in ioaccel2 mode */ + memset(tagupper, 0, sizeof(*tagupper)); + *taglower = cm2->Tag; + return; + } + *tagupper = c->Header.Tag.upper; + *taglower = c->Header.Tag.lower; +} + + static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, struct CommandList *abort, int swizzle) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; + u32 tagupper, taglower; c = cmd_special_alloc(h); if (c == NULL) { /* trouble... */ @@ -2474,8 +4365,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, if (swizzle) swizzle_abort_tag(&c->Request.CDB[4]); hpsa_scsi_do_simple_cmd_core(h, c); + hpsa_get_tag(h, abort, &taglower, &tagupper); dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", - __func__, abort->Header.Tag.upper, abort->Header.Tag.lower); + __func__, tagupper, taglower); /* no unmap needed here because no data xfer. */ ei = c->err_info; @@ -2487,15 +4379,14 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, break; default: dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", - __func__, abort->Header.Tag.upper, - abort->Header.Tag.lower); - hpsa_scsi_interpret_error(c); + __func__, tagupper, taglower); + hpsa_scsi_interpret_error(h, c); rc = -1; break; } cmd_special_free(h, c); - dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, - abort->Header.Tag.upper, abort->Header.Tag.lower); + dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", + __func__, tagupper, taglower); return rc; } @@ -2549,6 +4440,83 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h, return NULL; } +/* ioaccel2 path firmware cannot handle abort task requests. + * Change abort requests to physical target reset, and send to the + * address of the physical disk used for the ioaccel 2 command. + * Return 0 on success (IO_OK) + * -1 on failure + */ + +static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, + unsigned char *scsi3addr, struct CommandList *abort) +{ + int rc = IO_OK; + struct scsi_cmnd *scmd; /* scsi command within request being aborted */ + struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ + unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ + unsigned char *psa = &phys_scsi3addr[0]; + + /* Get a pointer to the hpsa logical device. */ + scmd = (struct scsi_cmnd *) abort->scsi_cmd; + dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); + if (dev == NULL) { + dev_warn(&h->pdev->dev, + "Cannot abort: no device pointer for command.\n"); + return -1; /* not abortable */ + } + + if (h->raid_offload_debug > 0) + dev_info(&h->pdev->dev, + "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + h->scsi_host->host_no, dev->bus, dev->target, dev->lun, + scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], + scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); + + if (!dev->offload_enabled) { + dev_warn(&h->pdev->dev, + "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); + return -1; /* not abortable */ + } + + /* Incoming scsi3addr is logical addr. We need physical disk addr. */ + if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { + dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); + return -1; /* not abortable */ + } + + /* send the reset */ + if (h->raid_offload_debug > 0) + dev_info(&h->pdev->dev, + "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); + if (rc != 0) { + dev_warn(&h->pdev->dev, + "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + return rc; /* failed to reset */ + } + + /* wait for device to recover */ + if (wait_for_device_to_become_ready(h, psa) != 0) { + dev_warn(&h->pdev->dev, + "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + return -1; /* failed to recover */ + } + + /* device recovered */ + dev_info(&h->pdev->dev, + "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + psa[0], psa[1], psa[2], psa[3], + psa[4], psa[5], psa[6], psa[7]); + + return rc; /* success */ +} + /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to * tell which kind we're dealing with, so we send the abort both ways. There * shouldn't be any collisions between swizzled and unswizzled tags due to the @@ -2562,6 +4530,14 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h, struct CommandList *c; int rc = 0, rc2 = 0; + /* ioccelerator mode 2 commands should be aborted via the + * accelerated path, since RAID path is unaware of these commands, + * but underlying firmware can't handle abort TMF. + * Change abort to physical device reset. + */ + if (abort->cmd_type == CMD_IOACCEL2) + return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); + /* we do not expect to find the swizzled tag in our queue, but * check anyway just to be sure the assumptions which make this * the case haven't become wrong. @@ -2600,6 +4576,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ char msg[256]; /* For debug messaging. */ int ml = 0; + u32 tagupper, taglower; /* Find the controller of the command to be aborted */ h = sdev_to_hba(sc->device); @@ -2632,9 +4609,8 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) msg); return FAILED; } - - ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", - abort->Header.Tag.upper, abort->Header.Tag.lower); + hpsa_get_tag(h, abort, &taglower, &tagupper); + ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); as = (struct scsi_cmnd *) abort->scsi_cmd; if (as != NULL) ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", @@ -2721,7 +4697,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) } while (test_and_set_bit (i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); - h->nr_allocs++; spin_unlock_irqrestore(&h->lock, flags); c = h->cmd_pool + i; @@ -2761,6 +4736,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) return NULL; memset(c, 0, sizeof(*c)); + c->cmd_type = CMD_SCSI; c->cmdindex = -1; c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), @@ -2793,7 +4769,6 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c) spin_lock_irqsave(&h->lock, flags); clear_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); - h->nr_frees++; spin_unlock_irqrestore(&h->lock, flags); } @@ -2975,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; - if (iocommand.Request.Type.Direction == XFER_WRITE) { + if (iocommand.Request.Type.Direction & XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { @@ -3024,7 +4999,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) c->SG[0].Addr.lower = temp64.val32.lower; c->SG[0].Addr.upper = temp64.val32.upper; c->SG[0].Len = iocommand.buf_size; - c->SG[0].Ext = 0; /* we are not chaining*/ + c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/ } hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); if (iocommand.buf_size > 0) @@ -3038,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) rc = -EFAULT; goto out; } - if (iocommand.Request.Type.Direction == XFER_READ && + if ((iocommand.Request.Type.Direction & XFER_READ) && iocommand.buf_size > 0) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { @@ -3115,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) status = -ENOMEM; goto cleanup1; } - if (ioc->Request.Type.Direction == XFER_WRITE) { + if (ioc->Request.Type.Direction & XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -ENOMEM; goto cleanup1; @@ -3149,13 +5124,12 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) hpsa_pci_unmap(h->pdev, c, i, PCI_DMA_BIDIRECTIONAL); status = -ENOMEM; - goto cleanup1; + goto cleanup0; } c->SG[i].Addr.lower = temp64.val32.lower; c->SG[i].Addr.upper = temp64.val32.upper; c->SG[i].Len = buff_size[i]; - /* we are not chaining */ - c->SG[i].Ext = 0; + c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST; } } hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); @@ -3165,24 +5139,23 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); if (copy_to_user(argp, ioc, sizeof(*ioc))) { - cmd_special_free(h, c); status = -EFAULT; - goto cleanup1; + goto cleanup0; } - if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { + if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { if (copy_to_user(ptr, buff[i], buff_size[i])) { - cmd_special_free(h, c); status = -EFAULT; - goto cleanup1; + goto cleanup0; } ptr += buff_size[i]; } } - cmd_special_free(h, c); status = 0; +cleanup0: + cmd_special_free(h, c); cleanup1: if (buff) { for (i = 0; i < sg_used; i++) @@ -3201,6 +5174,36 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void) check_for_unit_attention(h, c); } + +static int increment_passthru_count(struct ctlr_info *h) +{ + unsigned long flags; + + spin_lock_irqsave(&h->passthru_count_lock, flags); + if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { + spin_unlock_irqrestore(&h->passthru_count_lock, flags); + return -1; + } + h->passthru_count++; + spin_unlock_irqrestore(&h->passthru_count_lock, flags); + return 0; +} + +static void decrement_passthru_count(struct ctlr_info *h) +{ + unsigned long flags; + + spin_lock_irqsave(&h->passthru_count_lock, flags); + if (h->passthru_count <= 0) { + spin_unlock_irqrestore(&h->passthru_count_lock, flags); + /* not expecting to get here. */ + dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); + return; + } + h->passthru_count--; + spin_unlock_irqrestore(&h->passthru_count_lock, flags); +} + /* * ioctl */ @@ -3208,6 +5211,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) { struct ctlr_info *h; void __user *argp = (void __user *)arg; + int rc; h = sdev_to_hba(dev); @@ -3222,9 +5226,17 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) case CCISS_GETDRIVVER: return hpsa_getdrivver_ioctl(h, argp); case CCISS_PASSTHRU: - return hpsa_passthru_ioctl(h, argp); + if (increment_passthru_count(h)) + return -EAGAIN; + rc = hpsa_passthru_ioctl(h, argp); + decrement_passthru_count(h); + return rc; case CCISS_BIG_PASSTHRU: - return hpsa_big_passthru_ioctl(h, argp); + if (increment_passthru_count(h)) + return -EAGAIN; + rc = hpsa_big_passthru_ioctl(h, argp); + decrement_passthru_count(h); + return rc; default: return -ENOTTY; } @@ -3252,7 +5264,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, } static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, - void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, + void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, int cmd_type) { int pci_dir = XFER_NONE; @@ -3275,9 +5287,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, switch (cmd) { case HPSA_INQUIRY: /* are we trying to read a vital product page */ - if (page_code != 0) { + if (page_code & VPD_PAGE) { c->Request.CDB[1] = 0x01; - c->Request.CDB[2] = page_code; + c->Request.CDB[2] = (page_code & 0xff); } c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; @@ -3317,6 +5329,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; break; + case HPSA_GET_RAID_MAP: + c->Request.CDBLen = 12; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_READ; + c->Request.Timeout = 0; + c->Request.CDB[0] = HPSA_CISS_READ; + c->Request.CDB[1] = cmd; + c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + c->Request.CDB[9] = size & 0xFF; + break; + case BMIC_SENSE_CONTROLLER_PARAMETERS: + c->Request.CDBLen = 10; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_READ; + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_READ; + c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + break; default: dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); BUG(); @@ -3412,20 +5446,21 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) /* Takes cmds off the submission queue and sends them to the hardware, * then puts them on the queue of cmds waiting for completion. + * Assumes h->lock is held */ -static void start_io(struct ctlr_info *h) +static void start_io(struct ctlr_info *h, unsigned long *flags) { struct CommandList *c; - unsigned long flags; - spin_lock_irqsave(&h->lock, flags); while (!list_empty(&h->reqQ)) { c = list_entry(h->reqQ.next, struct CommandList, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { + h->fifo_recently_full = 1; dev_warn(&h->pdev->dev, "fifo full\n"); break; } + h->fifo_recently_full = 0; /* Get the first entry from the Request Q */ removeQ(c); @@ -3439,14 +5474,20 @@ static void start_io(struct ctlr_info *h) * condition. */ h->commands_outstanding++; - if (h->commands_outstanding > h->max_outstanding) - h->max_outstanding = h->commands_outstanding; /* Tell the controller execute command */ - spin_unlock_irqrestore(&h->lock, flags); + spin_unlock_irqrestore(&h->lock, *flags); h->access.submit_command(h, c); - spin_lock_irqsave(&h->lock, flags); + spin_lock_irqsave(&h->lock, *flags); } +} + +static void lock_and_start_io(struct ctlr_info *h) +{ + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); + start_io(h, &flags); spin_unlock_irqrestore(&h->lock, flags); } @@ -3479,15 +5520,42 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index, static inline void finish_cmd(struct CommandList *c) { unsigned long flags; + int io_may_be_stalled = 0; + struct ctlr_info *h = c->h; - spin_lock_irqsave(&c->h->lock, flags); + spin_lock_irqsave(&h->lock, flags); removeQ(c); - spin_unlock_irqrestore(&c->h->lock, flags); + + /* + * Check for possibly stalled i/o. + * + * If a fifo_full condition is encountered, requests will back up + * in h->reqQ. This queue is only emptied out by start_io which is + * only called when a new i/o request comes in. If no i/o's are + * forthcoming, the i/o's in h->reqQ can get stuck. So we call + * start_io from here if we detect such a danger. + * + * Normally, we shouldn't hit this case, but pounding on the + * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if + * commands_outstanding is low. We want to avoid calling + * start_io from in here as much as possible, and esp. don't + * want to get in a cycle where we call start_io every time + * through here. + */ + if (unlikely(h->fifo_recently_full) && + h->commands_outstanding < 5) + io_may_be_stalled = 1; + + spin_unlock_irqrestore(&h->lock, flags); + dial_up_lockup_detection_on_fw_flash_complete(c->h, c); - if (likely(c->cmd_type == CMD_SCSI)) + if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI + || c->cmd_type == CMD_IOACCEL2)) complete_scsi_command(c); else if (c->cmd_type == CMD_IOCTL_PEND) complete(c->waiting); + if (unlikely(io_may_be_stalled)) + lock_and_start_io(h); } static inline u32 hpsa_tag_contains_index(u32 tag) @@ -3763,6 +5831,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); writel(use_doorbell, vaddr + SA5_DOORBELL); + + /* PMC hardware guys tell us we need a 10 second delay after + * doorbell reset and before any attempt to talk to the board + * at all to ensure that this actually works and doesn't fall + * over in some weird corner cases. + */ + msleep(10000); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power @@ -3959,16 +6034,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) need a little pause here */ msleep(HPSA_POST_RESET_PAUSE_MSECS); - /* Wait for board to become not ready, then ready. */ - dev_info(&pdev->dev, "Waiting for board to reset.\n"); - rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); - if (rc) { - dev_warn(&pdev->dev, - "failed waiting for board to reset." - " Will try soft reset.\n"); - rc = -ENOTSUPP; /* Not expected, but try soft reset later */ - goto unmap_cfgtable; - } rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, @@ -4092,21 +6157,26 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) goto default_int_mode; if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { dev_info(&h->pdev->dev, "MSIX\n"); + h->msix_vector = MAX_REPLY_QUEUES; + if (h->msix_vector > num_online_cpus()) + h->msix_vector = num_online_cpus(); err = pci_enable_msix(h->pdev, hpsa_msix_entries, - MAX_REPLY_QUEUES); - if (!err) { - for (i = 0; i < MAX_REPLY_QUEUES; i++) - h->intr[i] = hpsa_msix_entries[i].vector; - h->msix_vector = 1; - return; - } + h->msix_vector); if (err > 0) { dev_warn(&h->pdev->dev, "only %d MSI-X vectors " "available\n", err); - goto default_int_mode; + h->msix_vector = err; + err = pci_enable_msix(h->pdev, hpsa_msix_entries, + h->msix_vector); + } + if (!err) { + for (i = 0; i < h->msix_vector; i++) + h->intr[i] = hpsa_msix_entries[i].vector; + return; } else { dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); + h->msix_vector = 0; goto default_int_mode; } } @@ -4259,6 +6329,7 @@ static void hpsa_find_board_params(struct ctlr_info *h) hpsa_get_max_perf_mode_cmds(h); h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); + h->fw_support = readl(&(h->cfgtable->misc_fw_support)); /* * Limit in-command s/g elements to 32 save dma'able memory. * Howvever spec says if 0, use 31 @@ -4275,6 +6346,10 @@ static void hpsa_find_board_params(struct ctlr_info *h) /* Find out what task management functions are supported and cache */ h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); + if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); + if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) + dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); } static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) @@ -4286,16 +6361,17 @@ static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) return true; } -/* Need to enable prefetch in the SCSI core for 6400 in x86 */ -static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) +static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) { -#ifdef CONFIG_X86 - u32 prefetch; + u32 driver_support; - prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); - prefetch |= 0x100; - writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); +#ifdef CONFIG_X86 + /* Need to enable prefetch in the SCSI core for 6400 in x86 */ + driver_support = readl(&(h->cfgtable->driver_support)); + driver_support |= ENABLE_SCSI_PREFETCH; #endif + driver_support |= ENABLE_UNIT_ATTN; + writel(driver_support, &(h->cfgtable->driver_support)); } /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result @@ -4312,6 +6388,23 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); } +static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) +{ + int i; + u32 doorbell_value; + unsigned long flags; + /* wait until the clear_event_notify bit 6 is cleared by controller. */ + for (i = 0; i < MAX_CONFIG_WAIT; i++) { + spin_lock_irqsave(&h->lock, flags); + doorbell_value = readl(h->vaddr + SA5_DOORBELL); + spin_unlock_irqrestore(&h->lock, flags); + if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) + break; + /* delay and try again */ + msleep(20); + } +} + static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h) { int i; @@ -4342,18 +6435,20 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h) return -ENOTSUPP; h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); + /* Update the field, and then ring the doorbell */ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); + writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); hpsa_wait_for_mode_change_ack(h); print_cfg_table(&h->pdev->dev, h->cfgtable); - if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { - dev_warn(&h->pdev->dev, - "unable to get board into simple mode\n"); - return -ENODEV; - } + if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) + goto error; h->transMethod = CFGTBL_Trans_Simple; return 0; +error: + dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); + return -ENODEV; } static int hpsa_pci_init(struct ctlr_info *h) @@ -4405,7 +6500,7 @@ static int hpsa_pci_init(struct ctlr_info *h) err = -ENODEV; goto err_out_free_res; } - hpsa_enable_scsi_prefetch(h); + hpsa_set_driver_support_bits(h); hpsa_p600_dma_prefetch_quirk(h); err = hpsa_enter_simple_mode(h); if (err) @@ -4499,11 +6594,30 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct CommandList), h->cmd_pool, h->cmd_pool_dhandle); + if (h->ioaccel2_cmd_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); if (h->errinfo_pool) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); + if (h->ioaccel_cmd_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(struct io_accel1_cmd), + h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); +} + +static void hpsa_irq_affinity_hints(struct ctlr_info *h) +{ + int i, cpu, rc; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < h->msix_vector; i++) { + rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } } static int hpsa_request_irq(struct ctlr_info *h, @@ -4519,15 +6633,16 @@ static int hpsa_request_irq(struct ctlr_info *h, for (i = 0; i < MAX_REPLY_QUEUES; i++) h->q[i] = (u8) i; - if (h->intr_mode == PERF_MODE_INT && h->msix_vector) { + if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { /* If performant mode and MSI-X, use multiple reply queues */ - for (i = 0; i < MAX_REPLY_QUEUES; i++) + for (i = 0; i < h->msix_vector; i++) rc = request_irq(h->intr[i], msixhandler, 0, h->devname, &h->q[i]); + hpsa_irq_affinity_hints(h); } else { /* Use single reply pool */ - if (h->msix_vector || h->msi_vector) { + if (h->msix_vector > 0 || h->msi_vector) { rc = request_irq(h->intr[h->intr_mode], msixhandler, 0, h->devname, &h->q[h->intr_mode]); @@ -4576,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h) if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { /* Single reply queue, only one irq to free */ i = h->intr_mode; + irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); return; } - for (i = 0; i < MAX_REPLY_QUEUES; i++) + for (i = 0; i < h->msix_vector; i++) { + irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); + } } static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) @@ -4598,14 +6716,28 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) #endif /* CONFIG_PCI_MSI */ } +static void hpsa_free_reply_queues(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->nreply_queues; i++) { + if (!h->reply_queue[i].head) + continue; + pci_free_consistent(h->pdev, h->reply_queue_size, + h->reply_queue[i].head, h->reply_queue[i].busaddr); + h->reply_queue[i].head = NULL; + h->reply_queue[i].busaddr = 0; + } +} + static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) { hpsa_free_irqs_and_disable_msix(h); hpsa_free_sg_chain_blocks(h); hpsa_free_cmd_pool(h); + kfree(h->ioaccel1_blockFetchTable); kfree(h->blockFetchTable); - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); + hpsa_free_reply_queues(h); if (h->vaddr) iounmap(h->vaddr); if (h->transtable) @@ -4616,16 +6748,6 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) kfree(h); } -static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) -{ - assert_spin_locked(&lockup_detector_lock); - if (!hpsa_lockup_detector) - return; - if (h->lockup_detected) - return; /* already stopped the lockup detector */ - list_del(&h->lockup_list); -} - /* Called when controller lockup detected. */ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) { @@ -4640,18 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) } } +static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) +{ + int i, cpu; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < num_online_cpus(); i++) { + u32 *lockup_detected; + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + *lockup_detected = value; + cpu = cpumask_next(cpu, cpu_online_mask); + } + wmb(); /* be sure the per-cpu variables are out to memory */ +} + static void controller_lockup_detected(struct ctlr_info *h) { unsigned long flags; + u32 lockup_detected; - assert_spin_locked(&lockup_detector_lock); - remove_ctlr_from_lockup_detector_list(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); spin_lock_irqsave(&h->lock, flags); - h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + if (!lockup_detected) { + /* no heartbeat, but controller gave us a zero. */ + dev_warn(&h->pdev->dev, + "lockup detected but scratchpad register is zero\n"); + lockup_detected = 0xffffffff; + } + set_lockup_detected_for_all_cpus(h, lockup_detected); spin_unlock_irqrestore(&h->lock, flags); dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", - h->lockup_detected); + lockup_detected); pci_disable_device(h->pdev); spin_lock_irqsave(&h->lock, flags); fail_all_cmds_on_list(h, &h->cmpQ); @@ -4665,7 +6807,6 @@ static void detect_controller_lockup(struct ctlr_info *h) u32 heartbeat; unsigned long flags; - assert_spin_locked(&lockup_detector_lock); now = get_jiffies_64(); /* If we've received an interrupt recently, we're ok. */ if (time_after64(h->last_intr_timestamp + @@ -4695,68 +6836,117 @@ static void detect_controller_lockup(struct ctlr_info *h) h->last_heartbeat_timestamp = now; } -static int detect_controller_lockup_thread(void *notused) +static void hpsa_ack_ctlr_events(struct ctlr_info *h) { - struct ctlr_info *h; - unsigned long flags; - - while (1) { - struct list_head *this, *tmp; - - schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); - if (kthread_should_stop()) - break; - spin_lock_irqsave(&lockup_detector_lock, flags); - list_for_each_safe(this, tmp, &hpsa_ctlr_list) { - h = list_entry(this, struct ctlr_info, lockup_list); - detect_controller_lockup(h); - } - spin_unlock_irqrestore(&lockup_detector_lock, flags); + int i; + char *event_type; + + /* Clear the driver-requested rescan flag */ + h->drv_req_rescan = 0; + + /* Ask the controller to clear the events we're handling. */ + if ((h->transMethod & (CFGTBL_Trans_io_accel1 + | CFGTBL_Trans_io_accel2)) && + (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || + h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { + + if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) + event_type = "state change"; + if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) + event_type = "configuration change"; + /* Stop sending new RAID offload reqs via the IO accelerator */ + scsi_block_requests(h->scsi_host); + for (i = 0; i < h->ndevices; i++) + h->dev[i]->offload_enabled = 0; + hpsa_drain_accel_commands(h); + /* Set 'accelerator path config change' bit */ + dev_warn(&h->pdev->dev, + "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", + h->events, event_type); + writel(h->events, &(h->cfgtable->clear_event_notify)); + /* Set the "clear event notify field update" bit 6 */ + writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); + /* Wait until ctlr clears 'clear event notify field', bit 6 */ + hpsa_wait_for_clear_event_notify_ack(h); + scsi_unblock_requests(h->scsi_host); + } else { + /* Acknowledge controller notification events. */ + writel(h->events, &(h->cfgtable->clear_event_notify)); + writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); + hpsa_wait_for_clear_event_notify_ack(h); +#if 0 + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + hpsa_wait_for_mode_change_ack(h); +#endif } - return 0; + return; } -static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) +/* Check a register on the controller to see if there are configuration + * changes (added/changed/removed logical drives, etc.) which mean that + * we should rescan the controller for devices. + * Also check flag for driver-initiated rescan. + */ +static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) { - unsigned long flags; + if (h->drv_req_rescan) + return 1; - h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; - spin_lock_irqsave(&lockup_detector_lock, flags); - list_add_tail(&h->lockup_list, &hpsa_ctlr_list); - spin_unlock_irqrestore(&lockup_detector_lock, flags); + if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) + return 0; + + h->events = readl(&(h->cfgtable->event_notify)); + return h->events & RESCAN_REQUIRED_EVENT_BITS; } -static void start_controller_lockup_detector(struct ctlr_info *h) +/* + * Check if any of the offline devices have become ready + */ +static int hpsa_offline_devices_ready(struct ctlr_info *h) { - /* Start the lockup detector thread if not already started */ - if (!hpsa_lockup_detector) { - spin_lock_init(&lockup_detector_lock); - hpsa_lockup_detector = - kthread_run(detect_controller_lockup_thread, - NULL, HPSA); - } - if (!hpsa_lockup_detector) { - dev_warn(&h->pdev->dev, - "Could not start lockup detector thread\n"); - return; + unsigned long flags; + struct offline_device_entry *d; + struct list_head *this, *tmp; + + spin_lock_irqsave(&h->offline_device_lock, flags); + list_for_each_safe(this, tmp, &h->offline_device_list) { + d = list_entry(this, struct offline_device_entry, + offline_list); + spin_unlock_irqrestore(&h->offline_device_lock, flags); + if (!hpsa_volume_offline(h, d->scsi3addr)) + return 1; + spin_lock_irqsave(&h->offline_device_lock, flags); } - add_ctlr_to_lockup_detector_list(h); + spin_unlock_irqrestore(&h->offline_device_lock, flags); + return 0; } -static void stop_controller_lockup_detector(struct ctlr_info *h) + +static void hpsa_monitor_ctlr_worker(struct work_struct *work) { unsigned long flags; + struct ctlr_info *h = container_of(to_delayed_work(work), + struct ctlr_info, monitor_ctlr_work); + detect_controller_lockup(h); + if (lockup_detected(h)) + return; + + if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { + scsi_host_get(h->scsi_host); + h->drv_req_rescan = 0; + hpsa_ack_ctlr_events(h); + hpsa_scan_start(h->scsi_host); + scsi_host_put(h->scsi_host); + } - spin_lock_irqsave(&lockup_detector_lock, flags); - remove_ctlr_from_lockup_detector_list(h); - /* If the list of ctlr's to monitor is empty, stop the thread */ - if (list_empty(&hpsa_ctlr_list)) { - spin_unlock_irqrestore(&lockup_detector_lock, flags); - kthread_stop(hpsa_lockup_detector); - spin_lock_irqsave(&lockup_detector_lock, flags); - hpsa_lockup_detector = NULL; + spin_lock_irqsave(&h->lock, flags); + if (h->remove_in_progress) { + spin_unlock_irqrestore(&h->lock, flags); + return; } - spin_unlock_irqrestore(&lockup_detector_lock, flags); + schedule_delayed_work(&h->monitor_ctlr_work, + h->heartbeat_sample_interval); + spin_unlock_irqrestore(&h->lock, flags); } static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -4788,7 +6978,6 @@ reinit_after_soft_reset: * the 5 lower bits of the address are used by the hardware. and by * the driver. See comments in hpsa.h for more info. */ -#define COMMANDLIST_ALIGNMENT 32 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) @@ -4798,8 +6987,18 @@ reinit_after_soft_reset: h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; INIT_LIST_HEAD(&h->cmpQ); INIT_LIST_HEAD(&h->reqQ); + INIT_LIST_HEAD(&h->offline_device_list); spin_lock_init(&h->lock); + spin_lock_init(&h->offline_device_lock); spin_lock_init(&h->scan_lock); + spin_lock_init(&h->passthru_count_lock); + + /* Allocate and clear per-cpu variable lockup_detected */ + h->lockup_detected = alloc_percpu(u32); + if (!h->lockup_detected) + goto clean1; + set_lockup_detected_for_all_cpus(h, 0); + rc = hpsa_pci_init(h); if (rc != 0) goto clean1; @@ -4839,6 +7038,7 @@ reinit_after_soft_reset: pci_set_drvdata(pdev, h); h->ndevices = 0; + h->hba_mode_enabled = 0; h->scsi_host = NULL; spin_lock_init(&h->devlock); hpsa_put_ctlr_into_performant_mode(h); @@ -4898,13 +7098,23 @@ reinit_after_soft_reset: goto reinit_after_soft_reset; } + /* Enable Accelerated IO path at driver layer */ + h->acciopath_status = 1; + + h->drv_req_rescan = 0; + /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, HPSA_INTR_ON); hpsa_hba_inquiry(h); hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ - start_controller_lockup_detector(h); - return 1; + + /* Monitor the controller for firmware lockups */ + h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; + INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); + schedule_delayed_work(&h->monitor_ctlr_work, + h->heartbeat_sample_interval); + return 0; clean4: hpsa_free_sg_chain_blocks(h); @@ -4912,6 +7122,8 @@ clean4: free_irqs(h); clean2: clean1: + if (h->lockup_detected) + free_percpu(h->lockup_detected); kfree(h); return rc; } @@ -4921,6 +7133,9 @@ static void hpsa_flush_cache(struct ctlr_info *h) char *flush_buf; struct CommandList *c; + /* Don't bother trying to flush the cache if locked up */ + if (unlikely(lockup_detected(h))) + return; flush_buf = kzalloc(4, GFP_KERNEL); if (!flush_buf) return; @@ -4969,13 +7184,20 @@ static void hpsa_free_device_info(struct ctlr_info *h) static void hpsa_remove_one(struct pci_dev *pdev) { struct ctlr_info *h; + unsigned long flags; if (pci_get_drvdata(pdev) == NULL) { dev_err(&pdev->dev, "unable to remove device\n"); return; } h = pci_get_drvdata(pdev); - stop_controller_lockup_detector(h); + + /* Get rid of any controller monitoring work items */ + spin_lock_irqsave(&h->lock, flags); + h->remove_in_progress = 1; + cancel_delayed_work(&h->monitor_ctlr_work); + spin_unlock_irqrestore(&h->lock, flags); + hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ hpsa_shutdown(pdev); iounmap(h->vaddr); @@ -4989,14 +7211,15 @@ static void hpsa_remove_one(struct pci_dev *pdev) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); + hpsa_free_reply_queues(h); kfree(h->cmd_pool_bits); kfree(h->blockFetchTable); + kfree(h->ioaccel1_blockFetchTable); + kfree(h->ioaccel2_blockFetchTable); kfree(h->hba_inquiry_data); pci_disable_device(pdev); pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); + free_percpu(h->lockup_detected); kfree(h); } @@ -5034,20 +7257,17 @@ static struct pci_driver hpsa_pci_driver = { * bits of the command address. */ static void calc_bucket_map(int bucket[], int num_buckets, - int nsgs, int *bucket_map) + int nsgs, int min_blocks, int *bucket_map) { int i, j, b, size; - /* even a command with 0 SGs requires 4 blocks */ -#define MINIMUM_TRANSFER_BLOCKS 4 -#define NUM_BUCKETS 8 /* Note, bucket_map must have nsgs+1 entries. */ for (i = 0; i <= nsgs; i++) { /* Compute size of a command with i SG entries */ - size = i + MINIMUM_TRANSFER_BLOCKS; + size = i + min_blocks; b = num_buckets; /* Assume the biggest bucket */ /* Find the bucket that is just big enough */ - for (j = 0; j < 8; j++) { + for (j = 0; j < num_buckets; j++) { if (bucket[j] >= size) { b = j; break; @@ -5058,10 +7278,16 @@ static void calc_bucket_map(int bucket[], int num_buckets, } } -static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) +static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) { int i; unsigned long register_value; + unsigned long transMethod = CFGTBL_Trans_Performant | + (trans_support & CFGTBL_Trans_use_short_tags) | + CFGTBL_Trans_enable_directed_msix | + (trans_support & (CFGTBL_Trans_io_accel1 | + CFGTBL_Trans_io_accel2)); + struct access_method access = SA5_performant_access; /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different @@ -5081,6 +7307,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) * sizes for small commands, and fewer sizes for larger commands. */ int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; +#define MIN_IOACCEL2_BFT_ENTRY 5 +#define HPSA_IOACCEL2_HEADER_SZ 4 + int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, + HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; + BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); + BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > + 16 * MIN_IOACCEL2_BFT_ENTRY); + BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); /* 5 = 1 s/g entry or 4k * 6 = 2 s/g entry or 8k @@ -5088,12 +7324,20 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) * 10 = 6 s/g entry or 24k */ + /* If the controller supports either ioaccel method then + * we can also use the RAID stack submit path that does not + * perform the superfluous readl() after each command submission. + */ + if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) + access = SA5_performant_access_no_read; + /* Controller spec: zero out this buffer. */ - memset(h->reply_pool, 0, h->reply_pool_size); + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, 0, h->reply_queue_size); bft[7] = SG_ENTRIES_IN_CMD + 4; calc_bucket_map(bft, ARRAY_SIZE(bft), - SG_ENTRIES_IN_CMD, h->blockFetchTable); + SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); for (i = 0; i < 8; i++) writel(bft[i], &h->transtable->BlockFetch[i]); @@ -5105,14 +7349,26 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) for (i = 0; i < h->nreply_queues; i++) { writel(0, &h->transtable->RepQAddr[i].upper); - writel(h->reply_pool_dhandle + - (h->max_commands * sizeof(u64) * i), + writel(h->reply_queue[i].busaddr, &h->transtable->RepQAddr[i].lower); } - writel(CFGTBL_Trans_Performant | use_short_tags | - CFGTBL_Trans_enable_directed_msix, - &(h->cfgtable->HostWrite.TransportRequest)); + writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); + writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); + /* + * enable outbound interrupt coalescing in accelerator mode; + */ + if (trans_support & CFGTBL_Trans_io_accel1) { + access = SA5_ioaccel_mode1_access; + writel(10, &h->cfgtable->HostWrite.CoalIntDelay); + writel(4, &h->cfgtable->HostWrite.CoalIntCount); + } else { + if (trans_support & CFGTBL_Trans_io_accel2) { + access = SA5_ioaccel_mode2_access; + writel(10, &h->cfgtable->HostWrite.CoalIntDelay); + writel(4, &h->cfgtable->HostWrite.CoalIntCount); + } + } writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); hpsa_wait_for_mode_change_ack(h); register_value = readl(&(h->cfgtable->TransportActive)); @@ -5122,13 +7378,160 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) return; } /* Change the access methods to the performant access methods */ - h->access = SA5_performant_access; - h->transMethod = CFGTBL_Trans_Performant; + h->access = access; + h->transMethod = transMethod; + + if (!((trans_support & CFGTBL_Trans_io_accel1) || + (trans_support & CFGTBL_Trans_io_accel2))) + return; + + if (trans_support & CFGTBL_Trans_io_accel1) { + /* Set up I/O accelerator mode */ + for (i = 0; i < h->nreply_queues; i++) { + writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); + h->reply_queue[i].current_entry = + readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); + } + bft[7] = h->ioaccel_maxsg + 8; + calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, + h->ioaccel1_blockFetchTable); + + /* initialize all reply queue entries to unused */ + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, + (u8) IOACCEL_MODE1_REPLY_UNUSED, + h->reply_queue_size); + + /* set all the constant fields in the accelerator command + * frames once at init time to save CPU cycles later. + */ + for (i = 0; i < h->nr_cmds; i++) { + struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; + + cp->function = IOACCEL1_FUNCTION_SCSIIO; + cp->err_info = (u32) (h->errinfo_pool_dhandle + + (i * sizeof(struct ErrorInfo))); + cp->err_info_len = sizeof(struct ErrorInfo); + cp->sgl_offset = IOACCEL1_SGLOFFSET; + cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; + cp->timeout_sec = 0; + cp->ReplyQueue = 0; + cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | + DIRECT_LOOKUP_BIT; + cp->Tag.upper = 0; + cp->host_addr.lower = + (u32) (h->ioaccel_cmd_pool_dhandle + + (i * sizeof(struct io_accel1_cmd))); + cp->host_addr.upper = 0; + } + } else if (trans_support & CFGTBL_Trans_io_accel2) { + u64 cfg_offset, cfg_base_addr_index; + u32 bft2_offset, cfg_base_addr; + int rc; + + rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, + &cfg_base_addr_index, &cfg_offset); + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); + bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; + calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, + 4, h->ioaccel2_blockFetchTable); + bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); + BUILD_BUG_ON(offsetof(struct CfgTable, + io_accel_request_size_offset) != 0xb8); + h->ioaccel2_bft2_regs = + remap_pci_mem(pci_resource_start(h->pdev, + cfg_base_addr_index) + + cfg_offset + bft2_offset, + ARRAY_SIZE(bft2) * + sizeof(*h->ioaccel2_bft2_regs)); + for (i = 0; i < ARRAY_SIZE(bft2); i++) + writel(bft2[i], &h->ioaccel2_bft2_regs[i]); + } + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + hpsa_wait_for_mode_change_ack(h); +} + +static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) +{ + h->ioaccel_maxsg = + readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); + if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) + h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; + + /* Command structures must be aligned on a 128-byte boundary + * because the 7 lower bits of the address are used by the + * hardware. + */ + BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % + IOACCEL1_COMMANDLIST_ALIGNMENT); + h->ioaccel_cmd_pool = + pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + &(h->ioaccel_cmd_pool_dhandle)); + + h->ioaccel1_blockFetchTable = + kmalloc(((h->ioaccel_maxsg + 1) * + sizeof(u32)), GFP_KERNEL); + + if ((h->ioaccel_cmd_pool == NULL) || + (h->ioaccel1_blockFetchTable == NULL)) + goto clean_up; + + memset(h->ioaccel_cmd_pool, 0, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); + return 0; + +clean_up: + if (h->ioaccel_cmd_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), + h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); + kfree(h->ioaccel1_blockFetchTable); + return 1; +} + +static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) +{ + /* Allocate ioaccel2 mode command blocks and block fetch table */ + + h->ioaccel_maxsg = + readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); + if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) + h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; + + BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % + IOACCEL2_COMMANDLIST_ALIGNMENT); + h->ioaccel2_cmd_pool = + pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + &(h->ioaccel2_cmd_pool_dhandle)); + + h->ioaccel2_blockFetchTable = + kmalloc(((h->ioaccel_maxsg + 1) * + sizeof(u32)), GFP_KERNEL); + + if ((h->ioaccel2_cmd_pool == NULL) || + (h->ioaccel2_blockFetchTable == NULL)) + goto clean_up; + + memset(h->ioaccel2_cmd_pool, 0, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); + return 0; + +clean_up: + if (h->ioaccel2_cmd_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), + h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); + kfree(h->ioaccel2_blockFetchTable); + return 1; } static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; + unsigned long transMethod = CFGTBL_Trans_Performant | + CFGTBL_Trans_use_short_tags; int i; if (hpsa_simple_mode) @@ -5138,15 +7541,32 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) if (!(trans_support & PERFORMANT_MODE)) return; - h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1; + /* Check for I/O accelerator mode support */ + if (trans_support & CFGTBL_Trans_io_accel1) { + transMethod |= CFGTBL_Trans_io_accel1 | + CFGTBL_Trans_enable_directed_msix; + if (hpsa_alloc_ioaccel_cmd_and_bft(h)) + goto clean_up; + } else { + if (trans_support & CFGTBL_Trans_io_accel2) { + transMethod |= CFGTBL_Trans_io_accel2 | + CFGTBL_Trans_enable_directed_msix; + if (ioaccel2_alloc_cmds_and_bft(h)) + goto clean_up; + } + } + + h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; hpsa_get_max_perf_mode_cmds(h); /* Performant mode ring buffer and supporting data structures */ - h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; - h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, - &(h->reply_pool_dhandle)); + h->reply_queue_size = h->max_commands * sizeof(u64); for (i = 0; i < h->nreply_queues; i++) { - h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; + h->reply_queue[i].head = pci_alloc_consistent(h->pdev, + h->reply_queue_size, + &(h->reply_queue[i].busaddr)); + if (!h->reply_queue[i].head) + goto clean_up; h->reply_queue[i].size = h->max_commands; h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h->reply_queue[i].current_entry = 0; @@ -5155,23 +7575,42 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * sizeof(u32)), GFP_KERNEL); - - if ((h->reply_pool == NULL) - || (h->blockFetchTable == NULL)) + if (!h->blockFetchTable) goto clean_up; - hpsa_enter_performant_mode(h, - trans_support & CFGTBL_Trans_use_short_tags); - + hpsa_enter_performant_mode(h, trans_support); return; clean_up: - if (h->reply_pool) - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); + hpsa_free_reply_queues(h); kfree(h->blockFetchTable); } +static int is_accelerated_cmd(struct CommandList *c) +{ + return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; +} + +static void hpsa_drain_accel_commands(struct ctlr_info *h) +{ + struct CommandList *c = NULL; + unsigned long flags; + int accel_cmds_out; + + do { /* wait for all outstanding commands to drain out */ + accel_cmds_out = 0; + spin_lock_irqsave(&h->lock, flags); + list_for_each_entry(c, &h->cmpQ, list) + accel_cmds_out += is_accelerated_cmd(c); + list_for_each_entry(c, &h->reqQ, list) + accel_cmds_out += is_accelerated_cmd(c); + spin_unlock_irqrestore(&h->lock, flags); + if (accel_cmds_out <= 0) + break; + msleep(100); + } while (1); +} + /* * This is it. Register the PCI driver information for the cards we control * the OS will call our registered routines when it finds one of our cards. @@ -5186,5 +7625,83 @@ static void __exit hpsa_cleanup(void) pci_unregister_driver(&hpsa_pci_driver); } +static void __attribute__((unused)) verify_offsets(void) +{ +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) + + VERIFY_OFFSET(structure_size, 0); + VERIFY_OFFSET(volume_blk_size, 4); + VERIFY_OFFSET(volume_blk_cnt, 8); + VERIFY_OFFSET(phys_blk_shift, 16); + VERIFY_OFFSET(parity_rotation_shift, 17); + VERIFY_OFFSET(strip_size, 18); + VERIFY_OFFSET(disk_starting_blk, 20); + VERIFY_OFFSET(disk_blk_cnt, 28); + VERIFY_OFFSET(data_disks_per_row, 36); + VERIFY_OFFSET(metadata_disks_per_row, 38); + VERIFY_OFFSET(row_cnt, 40); + VERIFY_OFFSET(layout_map_count, 42); + VERIFY_OFFSET(flags, 44); + VERIFY_OFFSET(dekindex, 46); + /* VERIFY_OFFSET(reserved, 48 */ + VERIFY_OFFSET(data, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) + + VERIFY_OFFSET(IU_type, 0); + VERIFY_OFFSET(direction, 1); + VERIFY_OFFSET(reply_queue, 2); + /* VERIFY_OFFSET(reserved1, 3); */ + VERIFY_OFFSET(scsi_nexus, 4); + VERIFY_OFFSET(Tag, 8); + VERIFY_OFFSET(cdb, 16); + VERIFY_OFFSET(cciss_lun, 32); + VERIFY_OFFSET(data_len, 40); + VERIFY_OFFSET(cmd_priority_task_attr, 44); + VERIFY_OFFSET(sg_count, 45); + /* VERIFY_OFFSET(reserved3 */ + VERIFY_OFFSET(err_ptr, 48); + VERIFY_OFFSET(err_len, 56); + /* VERIFY_OFFSET(reserved4 */ + VERIFY_OFFSET(sg, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ + BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) + + VERIFY_OFFSET(dev_handle, 0x00); + VERIFY_OFFSET(reserved1, 0x02); + VERIFY_OFFSET(function, 0x03); + VERIFY_OFFSET(reserved2, 0x04); + VERIFY_OFFSET(err_info, 0x0C); + VERIFY_OFFSET(reserved3, 0x10); + VERIFY_OFFSET(err_info_len, 0x12); + VERIFY_OFFSET(reserved4, 0x13); + VERIFY_OFFSET(sgl_offset, 0x14); + VERIFY_OFFSET(reserved5, 0x15); + VERIFY_OFFSET(transfer_len, 0x1C); + VERIFY_OFFSET(reserved6, 0x20); + VERIFY_OFFSET(io_flags, 0x24); + VERIFY_OFFSET(reserved7, 0x26); + VERIFY_OFFSET(LUN, 0x34); + VERIFY_OFFSET(control, 0x3C); + VERIFY_OFFSET(CDB, 0x40); + VERIFY_OFFSET(reserved8, 0x50); + VERIFY_OFFSET(host_context_flags, 0x60); + VERIFY_OFFSET(timeout_sec, 0x62); + VERIFY_OFFSET(ReplyQueue, 0x64); + VERIFY_OFFSET(reserved9, 0x65); + VERIFY_OFFSET(Tag, 0x68); + VERIFY_OFFSET(host_addr, 0x70); + VERIFY_OFFSET(CISS_LUN, 0x78); + VERIFY_OFFSET(SG, 0x78 + 8); +#undef VERIFY_OFFSET +} + module_init(hpsa_init); module_exit(hpsa_cleanup); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 981647989bf..24472cec7de 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -1,6 +1,6 @@ /* * Disk Array driver for HP Smart Array SAS controllers - * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -46,14 +46,65 @@ struct hpsa_scsi_dev_t { unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ unsigned char model[16]; /* bytes 16-31 of inquiry data */ unsigned char raid_level; /* from inquiry page 0xC1 */ + unsigned char volume_offline; /* discovered via TUR or VPD */ + u32 ioaccel_handle; + int offload_config; /* I/O accel RAID offload configured */ + int offload_enabled; /* I/O accel RAID offload enabled */ + int offload_to_mirror; /* Send next I/O accelerator RAID + * offload request to mirror drive + */ + struct raid_map_data raid_map; /* I/O accelerator RAID map */ + }; -struct reply_pool { +struct reply_queue_buffer { u64 *head; size_t size; u8 wraparound; u32 current_entry; + dma_addr_t busaddr; +}; + +#pragma pack(1) +struct bmic_controller_parameters { + u8 led_flags; + u8 enable_command_list_verification; + u8 backed_out_write_drives; + u16 stripes_for_parity; + u8 parity_distribution_mode_flags; + u16 max_driver_requests; + u16 elevator_trend_count; + u8 disable_elevator; + u8 force_scan_complete; + u8 scsi_transfer_mode; + u8 force_narrow; + u8 rebuild_priority; + u8 expand_priority; + u8 host_sdb_asic_fix; + u8 pdpi_burst_from_host_disabled; + char software_name[64]; + char hardware_name[32]; + u8 bridge_revision; + u8 snapshot_priority; + u32 os_specific; + u8 post_prompt_timeout; + u8 automatic_drive_slamming; + u8 reserved1; + u8 nvram_flags; +#define HBA_MODE_ENABLED_FLAG (1 << 3) + u8 cache_nvram_flags; + u8 drive_config_flags; + u16 reserved2; + u8 temp_warning_level; + u8 temp_shutdown_level; + u8 temp_condition_reset; + u8 max_coalesce_commands; + u32 max_coalesce_delay; + u8 orca_password[4]; + u8 access_id[16]; + u8 reserved[356]; }; +#pragma pack() struct ctlr_info { int ctlr; @@ -66,11 +117,8 @@ struct ctlr_info { int nr_cmds; /* Number of commands allowed on this controller */ struct CfgTable __iomem *cfgtable; int interrupts_enabled; - int major; int max_commands; int commands_outstanding; - int max_outstanding; /* Debug */ - int usage_count; /* number of opens all all minor devices */ # define PERF_MODE_INT 0 # define DOORBELL_INT 1 # define SIMPLE_MODE_INT 2 @@ -80,6 +128,7 @@ struct ctlr_info { unsigned int msi_vector; int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ struct access_method access; + char hba_mode_enabled; /* queue and queue Info */ struct list_head reqQ; @@ -95,11 +144,13 @@ struct ctlr_info { /* pointers to command and error info pool */ struct CommandList *cmd_pool; dma_addr_t cmd_pool_dhandle; + struct io_accel1_cmd *ioaccel_cmd_pool; + dma_addr_t ioaccel_cmd_pool_dhandle; + struct io_accel2_cmd *ioaccel2_cmd_pool; + dma_addr_t ioaccel2_cmd_pool_dhandle; struct ErrorInfo *errinfo_pool; dma_addr_t errinfo_pool_dhandle; unsigned long *cmd_pool_bits; - int nr_allocs; - int nr_frees; int scan_finished; spinlock_t scan_lock; wait_queue_head_t scan_wait_queue; @@ -116,23 +167,35 @@ struct ctlr_info { struct TransTable_struct *transtable; unsigned long transMethod; + /* cap concurrent passthrus at some reasonable maximum */ +#define HPSA_MAX_CONCURRENT_PASSTHRUS (20) + spinlock_t passthru_count_lock; /* protects passthru_count */ + int passthru_count; + /* * Performant mode completion buffers */ - u64 *reply_pool; - size_t reply_pool_size; - struct reply_pool reply_queue[MAX_REPLY_QUEUES]; + size_t reply_queue_size; + struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES]; u8 nreply_queues; - dma_addr_t reply_pool_dhandle; u32 *blockFetchTable; + u32 *ioaccel1_blockFetchTable; + u32 *ioaccel2_blockFetchTable; + u32 *ioaccel2_bft2_regs; unsigned char *hba_inquiry_data; + u32 driver_support; + u32 fw_support; + int ioaccel_support; + int ioaccel_maxsg; u64 last_intr_timestamp; u32 last_heartbeat; u64 last_heartbeat_timestamp; u32 heartbeat_sample_interval; atomic_t firmware_flash_in_progress; - u32 lockup_detected; - struct list_head lockup_list; + u32 *lockup_detected; + struct delayed_work monitor_ctlr_work; + int remove_in_progress; + u32 fifo_recently_full; /* Address of h->q[x] is passed to intr handler to know which queue */ u8 q[MAX_REPLY_QUEUES]; u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ @@ -156,7 +219,33 @@ struct ctlr_info { #define HPSATMF_LOG_QRY_TASK (1 << 23) #define HPSATMF_LOG_QRY_TSET (1 << 24) #define HPSATMF_LOG_QRY_ASYNC (1 << 25) + u32 events; +#define CTLR_STATE_CHANGE_EVENT (1 << 0) +#define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1) +#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4) +#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5) +#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6) +#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30) +#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) + +#define RESCAN_REQUIRED_EVENT_BITS \ + (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ + CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ + CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ + CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ + CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) + spinlock_t offline_device_lock; + struct list_head offline_device_list; + int acciopath_status; + int drv_req_rescan; /* flag for driver to request rescan event */ + int raid_offload_debug; }; + +struct offline_device_entry { + unsigned char scsi3addr[8]; + struct list_head offline_list; +}; + #define HPSA_ABORT_MSG 0 #define HPSA_DEVICE_RESET_MSG 1 #define HPSA_RESET_TYPE_CONTROLLER 0x00 @@ -237,18 +326,39 @@ struct ctlr_info { #define HPSA_INTR_ON 1 #define HPSA_INTR_OFF 0 + +/* + * Inbound Post Queue offsets for IO Accelerator Mode 2 + */ +#define IOACCEL2_INBOUND_POSTQ_32 0x48 +#define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0 +#define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4 + /* Send the command to the hardware */ static void SA5_submit_command(struct ctlr_info *h, struct CommandList *c) { - dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, - c->Header.Tag.lower); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); } +static void SA5_submit_command_no_read(struct ctlr_info *h, + struct CommandList *c) +{ + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); +} + +static void SA5_submit_command_ioaccel2(struct ctlr_info *h, + struct CommandList *c) +{ + if (c->cmd_type == CMD_IOACCEL2) + writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); + else + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); +} + /* * This card is the opposite of the other cards. * 0 turns interrupts on... @@ -284,7 +394,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) { - struct reply_pool *rq = &h->reply_queue[q]; + struct reply_queue_buffer *rq = &h->reply_queue[q]; unsigned long flags, register_value = FIFO_EMPTY; /* msi auto clears the interrupt pending bit. */ @@ -363,7 +473,6 @@ static bool SA5_intr_pending(struct ctlr_info *h) { unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); - dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); return register_value & SA5_INTR_PENDING; } @@ -382,6 +491,50 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h) return register_value & SA5_OUTDB_STATUS_PERF_BIT; } +#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100 + +static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) +{ + unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); + + return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? + true : false; +} + +#define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0 +#define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8 +#define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC +#define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL + +static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) +{ + u64 register_value; + struct reply_queue_buffer *rq = &h->reply_queue[q]; + unsigned long flags; + + BUG_ON(q >= h->nreply_queues); + + register_value = rq->head[rq->current_entry]; + if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { + rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; + if (++rq->current_entry == rq->size) + rq->current_entry = 0; + /* + * @todo + * + * Don't really need to write the new index after each command, + * but with current driver design this is easiest. + */ + wmb(); + writel((q << 24) | rq->current_entry, h->vaddr + + IOACCEL_MODE1_CONSUMER_INDEX); + spin_lock_irqsave(&h->lock, flags); + h->commands_outstanding--; + spin_unlock_irqrestore(&h->lock, flags); + } + return (unsigned long) register_value; +} + static struct access_method SA5_access = { SA5_submit_command, SA5_intr_mask, @@ -390,6 +543,22 @@ static struct access_method SA5_access = { SA5_completed, }; +static struct access_method SA5_ioaccel_mode1_access = { + SA5_submit_command, + SA5_performant_intr_mask, + SA5_fifo_full, + SA5_ioaccel_mode1_intr_pending, + SA5_ioaccel_mode1_completed, +}; + +static struct access_method SA5_ioaccel_mode2_access = { + SA5_submit_command_ioaccel2, + SA5_performant_intr_mask, + SA5_fifo_full, + SA5_performant_intr_pending, + SA5_performant_completed, +}; + static struct access_method SA5_performant_access = { SA5_submit_command, SA5_performant_intr_mask, @@ -398,6 +567,14 @@ static struct access_method SA5_performant_access = { SA5_performant_completed, }; +static struct access_method SA5_performant_access_no_read = { + SA5_submit_command_no_read, + SA5_performant_intr_mask, + SA5_fifo_full, + SA5_performant_intr_pending, + SA5_performant_completed, +}; + struct board_type { u32 board_id; char *product_name; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index a894f2eca7a..b5125dc3143 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -1,6 +1,6 @@ /* * Disk Array driver for HP Smart Array SAS controllers - * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,6 +25,7 @@ #define SENSEINFOBYTES 32 /* may vary between hbas */ #define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */ #define HPSA_SG_CHAIN 0x80000000 +#define HPSA_SG_LAST 0x40000000 #define MAXREPLYQS 256 /* Command Status value */ @@ -41,6 +42,8 @@ #define CMD_UNSOLICITED_ABORT 0x000A #define CMD_TIMEOUT 0x000B #define CMD_UNABORTABLE 0x000C +#define CMD_IOACCEL_DISABLED 0x000E + /* Unit Attentions ASC's as defined for the MSA2012sa */ #define POWER_OR_RESET 0x29 @@ -79,8 +82,9 @@ #define ATTR_ACA 0x07 /* cdb type */ -#define TYPE_CMD 0x00 -#define TYPE_MSG 0x01 +#define TYPE_CMD 0x00 +#define TYPE_MSG 0x01 +#define TYPE_IOACCEL2_CMD 0x81 /* 0x81 is not used by hardware */ /* Message Types */ #define HPSA_TASK_MANAGEMENT 0x00 @@ -125,9 +129,12 @@ #define CFGTBL_AccCmds 0x00000001l #define DOORBELL_CTLR_RESET 0x00000004l #define DOORBELL_CTLR_RESET2 0x00000020l +#define DOORBELL_CLEAR_EVENTS 0x00000040l #define CFGTBL_Trans_Simple 0x00000002l #define CFGTBL_Trans_Performant 0x00000004l +#define CFGTBL_Trans_io_accel1 0x00000080l +#define CFGTBL_Trans_io_accel2 0x00000100l #define CFGTBL_Trans_use_short_tags 0x20000000l #define CFGTBL_Trans_enable_directed_msix (1 << 30) @@ -135,6 +142,28 @@ #define CFGTBL_BusType_Ultra3 0x00000002l #define CFGTBL_BusType_Fibre1G 0x00000100l #define CFGTBL_BusType_Fibre2G 0x00000200l + +/* VPD Inquiry types */ +#define HPSA_VPD_SUPPORTED_PAGES 0x00 +#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 +#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2 +#define HPSA_VPD_LV_STATUS 0xC3 +#define HPSA_VPD_HEADER_SZ 4 + +/* Logical volume states */ +#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff +#define HPSA_LV_OK 0x0 +#define HPSA_LV_UNDERGOING_ERASE 0x0F +#define HPSA_LV_UNDERGOING_RPI 0x12 +#define HPSA_LV_PENDING_RPI 0x13 +#define HPSA_LV_ENCRYPTED_NO_KEY 0x14 +#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15 +#define HPSA_LV_UNDERGOING_ENCRYPTION 0x16 +#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17 +#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18 +#define HPSA_LV_PENDING_ENCRYPTION 0x19 +#define HPSA_LV_PENDING_ENCRYPTION_REKEYING 0x1A + struct vals32 { u32 lower; u32 upper; @@ -162,17 +191,68 @@ struct InquiryData { #define HPSA_REPORT_LOG 0xc2 /* Report Logical LUNs */ #define HPSA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ +#define HPSA_REPORT_PHYS_EXTENDED 0x02 +#define HPSA_CISS_READ 0xc0 /* CISS Read */ +#define HPSA_GET_RAID_MAP 0xc8 /* CISS Get RAID Layout Map */ + +#define RAID_MAP_MAX_ENTRIES 256 + +struct raid_map_disk_data { + u32 ioaccel_handle; /**< Handle to access this disk via the + * I/O accelerator */ + u8 xor_mult[2]; /**< XOR multipliers for this position, + * valid for data disks only */ + u8 reserved[2]; +}; + +struct raid_map_data { + u32 structure_size; /* Size of entire structure in bytes */ + u32 volume_blk_size; /* bytes / block in the volume */ + u64 volume_blk_cnt; /* logical blocks on the volume */ + u8 phys_blk_shift; /* Shift factor to convert between + * units of logical blocks and physical + * disk blocks */ + u8 parity_rotation_shift; /* Shift factor to convert between units + * of logical stripes and physical + * stripes */ + u16 strip_size; /* blocks used on each disk / stripe */ + u64 disk_starting_blk; /* First disk block used in volume */ + u64 disk_blk_cnt; /* disk blocks used by volume / disk */ + u16 data_disks_per_row; /* data disk entries / row in the map */ + u16 metadata_disks_per_row; /* mirror/parity disk entries / row + * in the map */ + u16 row_cnt; /* rows in each layout map */ + u16 layout_map_count; /* layout maps (1 map per mirror/parity + * group) */ + u16 flags; /* Bit 0 set if encryption enabled */ +#define RAID_MAP_FLAG_ENCRYPT_ON 0x01 + u16 dekindex; /* Data encryption key index. */ + u8 reserved[16]; + struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; +}; + struct ReportLUNdata { u8 LUNListLength[4]; - u32 reserved; + u8 extended_response_flag; + u8 reserved[3]; u8 LUN[HPSA_MAX_LUN][8]; }; +struct ext_report_lun_entry { + u8 lunid[8]; + u8 wwid[8]; + u8 device_type; + u8 device_flags; + u8 lun_count; /* multi-lun device, how many luns */ + u8 redundant_paths; + u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ +}; + struct ReportExtendedLUNdata { u8 LUNListLength[4]; u8 extended_response_flag; u8 reserved[3]; - u8 LUN[HPSA_MAX_LUN][24]; + struct ext_report_lun_entry LUN[HPSA_MAX_LUN]; }; struct SenseSubsystem_info { @@ -187,6 +267,7 @@ struct SenseSubsystem_info { #define BMIC_CACHE_FLUSH 0xc2 #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */ #define BMIC_FLASH_FIRMWARE 0xF7 +#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 /* Command List Structure */ union SCSI3Addr { @@ -283,6 +364,8 @@ struct ErrorInfo { /* Command types */ #define CMD_IOCTL_PEND 0x01 #define CMD_SCSI 0x03 +#define CMD_IOACCEL1 0x04 +#define CMD_IOACCEL2 0x05 #define DIRECT_LOOKUP_SHIFT 5 #define DIRECT_LOOKUP_BIT 0x10 @@ -302,6 +385,7 @@ struct ctlr_info; /* defined in hpsa.h */ * or a bus address. */ +#define COMMANDLIST_ALIGNMENT 128 struct CommandList { struct CommandListHeader Header; struct RequestBlock Request; @@ -314,29 +398,173 @@ struct CommandList { int cmd_type; long cmdindex; struct list_head list; - struct request *rq; struct completion *waiting; void *scsi_cmd; +} __aligned(COMMANDLIST_ALIGNMENT); + +/* Max S/G elements in I/O accelerator command */ +#define IOACCEL1_MAXSGENTRIES 24 +#define IOACCEL2_MAXSGENTRIES 28 -/* on 64 bit architectures, to get this to be 32-byte-aligned - * it so happens we need PAD_64 bytes of padding, on 32 bit systems, - * we need PAD_32 bytes of padding (see below). This does that. - * If it happens that 64 bit and 32 bit systems need different - * padding, PAD_32 and PAD_64 can be set independently, and. - * the code below will do the right thing. +/* + * Structure for I/O accelerator (mode 1) commands. + * Note that this structure must be 128-byte aligned in size. */ -#define IS_32_BIT ((8 - sizeof(long))/4) -#define IS_64_BIT (!IS_32_BIT) -#define PAD_32 (4) -#define PAD_64 (4) -#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) - u8 pad[COMMANDLIST_PAD]; +#define IOACCEL1_COMMANDLIST_ALIGNMENT 128 +struct io_accel1_cmd { + u16 dev_handle; /* 0x00 - 0x01 */ + u8 reserved1; /* 0x02 */ + u8 function; /* 0x03 */ + u8 reserved2[8]; /* 0x04 - 0x0B */ + u32 err_info; /* 0x0C - 0x0F */ + u8 reserved3[2]; /* 0x10 - 0x11 */ + u8 err_info_len; /* 0x12 */ + u8 reserved4; /* 0x13 */ + u8 sgl_offset; /* 0x14 */ + u8 reserved5[7]; /* 0x15 - 0x1B */ + u32 transfer_len; /* 0x1C - 0x1F */ + u8 reserved6[4]; /* 0x20 - 0x23 */ + u16 io_flags; /* 0x24 - 0x25 */ + u8 reserved7[14]; /* 0x26 - 0x33 */ + u8 LUN[8]; /* 0x34 - 0x3B */ + u32 control; /* 0x3C - 0x3F */ + u8 CDB[16]; /* 0x40 - 0x4F */ + u8 reserved8[16]; /* 0x50 - 0x5F */ + u16 host_context_flags; /* 0x60 - 0x61 */ + u16 timeout_sec; /* 0x62 - 0x63 */ + u8 ReplyQueue; /* 0x64 */ + u8 reserved9[3]; /* 0x65 - 0x67 */ + struct vals32 Tag; /* 0x68 - 0x6F */ + struct vals32 host_addr; /* 0x70 - 0x77 */ + u8 CISS_LUN[8]; /* 0x78 - 0x7F */ + struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; +} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); + +#define IOACCEL1_FUNCTION_SCSIIO 0x00 +#define IOACCEL1_SGLOFFSET 32 + +#define IOACCEL1_IOFLAGS_IO_REQ 0x4000 +#define IOACCEL1_IOFLAGS_CDBLEN_MASK 0x001F +#define IOACCEL1_IOFLAGS_CDBLEN_MAX 16 + +#define IOACCEL1_CONTROL_NODATAXFER 0x00000000 +#define IOACCEL1_CONTROL_DATA_OUT 0x01000000 +#define IOACCEL1_CONTROL_DATA_IN 0x02000000 +#define IOACCEL1_CONTROL_TASKPRIO_MASK 0x00007800 +#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11 +#define IOACCEL1_CONTROL_SIMPLEQUEUE 0x00000000 +#define IOACCEL1_CONTROL_HEADOFQUEUE 0x00000100 +#define IOACCEL1_CONTROL_ORDEREDQUEUE 0x00000200 +#define IOACCEL1_CONTROL_ACA 0x00000400 + +#define IOACCEL1_HCFLAGS_CISS_FORMAT 0x0013 + +#define IOACCEL1_BUSADDR_CMDTYPE 0x00000060 + +struct ioaccel2_sg_element { + u64 address; + u32 length; + u8 reserved[3]; + u8 chain_indicator; +#define IOACCEL2_CHAIN 0x80 +}; + +/* + * SCSI Response Format structure for IO Accelerator Mode 2 + */ +struct io_accel2_scsi_response { + u8 IU_type; +#define IOACCEL2_IU_TYPE_SRF 0x60 + u8 reserved1[3]; + u8 req_id[4]; /* request identifier */ + u8 reserved2[4]; + u8 serv_response; /* service response */ +#define IOACCEL2_SERV_RESPONSE_COMPLETE 0x000 +#define IOACCEL2_SERV_RESPONSE_FAILURE 0x001 +#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE 0x002 +#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS 0x003 +#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED 0x004 +#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN 0x005 + u8 status; /* status */ +#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD 0x00 +#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND 0x02 +#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY 0x08 +#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON 0x18 +#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL 0x28 +#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED 0x40 +#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED 0x0E + u8 data_present; /* low 2 bits */ +#define IOACCEL2_NO_DATAPRESENT 0x000 +#define IOACCEL2_RESPONSE_DATAPRESENT 0x001 +#define IOACCEL2_SENSE_DATA_PRESENT 0x002 +#define IOACCEL2_RESERVED 0x003 + u8 sense_data_len; /* sense/response data length */ + u8 resid_cnt[4]; /* residual count */ + u8 sense_data_buff[32]; /* sense/response data buffer */ +}; + +/* + * Structure for I/O accelerator (mode 2 or m2) commands. + * Note that this structure must be 128-byte aligned in size. + */ +#define IOACCEL2_COMMANDLIST_ALIGNMENT 128 +struct io_accel2_cmd { + u8 IU_type; /* IU Type */ + u8 direction; /* direction, memtype, and encryption */ +#define IOACCEL2_DIRECTION_MASK 0x03 /* bits 0,1: direction */ +#define IOACCEL2_DIRECTION_MEMTYPE_MASK 0x04 /* bit 2: memtype source/dest */ + /* 0b=PCIe, 1b=DDR */ +#define IOACCEL2_DIRECTION_ENCRYPT_MASK 0x08 /* bit 3: encryption flag */ + /* 0=off, 1=on */ + u8 reply_queue; /* Reply Queue ID */ + u8 reserved1; /* Reserved */ + u32 scsi_nexus; /* Device Handle */ + u32 Tag; /* cciss tag, lower 4 bytes only */ + u32 tweak_lower; /* Encryption tweak, lower 4 bytes */ + u8 cdb[16]; /* SCSI Command Descriptor Block */ + u8 cciss_lun[8]; /* 8 byte SCSI address */ + u32 data_len; /* Total bytes to transfer */ + u8 cmd_priority_task_attr; /* priority and task attrs */ +#define IOACCEL2_PRIORITY_MASK 0x78 +#define IOACCEL2_ATTR_MASK 0x07 + u8 sg_count; /* Number of sg elements */ + u16 dekindex; /* Data encryption key index */ + u64 err_ptr; /* Error Pointer */ + u32 err_len; /* Error Length*/ + u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ + struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; + struct io_accel2_scsi_response error_data; +} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); + +/* + * defines for Mode 2 command struct + * FIXME: this can't be all I need mfm + */ +#define IOACCEL2_IU_TYPE 0x40 +#define IOACCEL2_IU_TMF_TYPE 0x41 +#define IOACCEL2_DIR_NO_DATA 0x00 +#define IOACCEL2_DIR_DATA_IN 0x01 +#define IOACCEL2_DIR_DATA_OUT 0x02 +/* + * SCSI Task Management Request format for Accelerator Mode 2 + */ +struct hpsa_tmf_struct { + u8 iu_type; /* Information Unit Type */ + u8 reply_queue; /* Reply Queue ID */ + u8 tmf; /* Task Management Function */ + u8 reserved1; /* byte 3 Reserved */ + u32 it_nexus; /* SCSI I-T Nexus */ + u8 lun_id[8]; /* LUN ID for TMF request */ + struct vals32 Tag; /* cciss tag associated w/ request */ + struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */ + u64 error_ptr; /* Error Pointer */ + u32 error_len; /* Error Length */ }; /* Configuration Table Structure */ struct HostWrite { u32 TransportRequest; - u32 Reserved; + u32 command_pool_addr_hi; u32 CoalIntDelay; u32 CoalIntCount; }; @@ -344,6 +572,9 @@ struct HostWrite { #define SIMPLE_MODE 0x02 #define PERFORMANT_MODE 0x04 #define MEMQ_MODE 0x08 +#define IOACCEL_MODE_1 0x80 + +#define DRIVER_SUPPORT_UA_ENABLE 0x00000001 struct CfgTable { u8 Signature[4]; @@ -356,7 +587,9 @@ struct CfgTable { u32 TransMethodOffset; u8 ServerName[16]; u32 HeartBeat; - u32 SCSI_Prefetch; + u32 driver_support; +#define ENABLE_SCSI_PREFETCH 0x100 +#define ENABLE_UNIT_ATTN 0x01 u32 MaxScatterGatherElements; u32 MaxLogicalUnits; u32 MaxPhysicalDevices; @@ -371,8 +604,18 @@ struct CfgTable { u32 misc_fw_support; /* offset 0x78 */ #define MISC_FW_DOORBELL_RESET (0x02) #define MISC_FW_DOORBELL_RESET2 (0x010) +#define MISC_FW_RAID_OFFLOAD_BASIC (0x020) +#define MISC_FW_EVENT_NOTIFY (0x080) u8 driver_version[32]; - + u32 max_cached_write_size; + u8 driver_scratchpad[16]; + u32 max_error_info_length; + u32 io_accel_max_embedded_sg_count; + u32 io_accel_request_size_offset; + u32 event_notify; +#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) +#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) + u32 clear_event_notify; }; #define NUM_BLOCKFETCH_ENTRIES 8 @@ -382,7 +625,7 @@ struct TransTable_struct { u32 RepQCount; u32 RepQCtrAddrLow32; u32 RepQCtrAddrHigh32; -#define MAX_REPLY_QUEUES 8 +#define MAX_REPLY_QUEUES 64 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; }; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index cc82d0f322b..8dd47689d58 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) return 0; } - if (vhost->state == IBMVFC_ACTIVE) { + if (vhost->logged_in) { evt = ibmvfc_get_event(vhost); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); @@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) tmf->common.length = sizeof(*tmf); tmf->scsi_id = rport->port_id; int_to_scsilun(sdev->lun, &tmf->lun); - tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS)) + type &= ~IBMVFC_TMF_SUPPRESS_ABTS; + if (vhost->state == IBMVFC_ACTIVE) + tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + else + tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID); tmf->cancel_key = (unsigned long)sdev->hostdata; tmf->my_cancel_key = (unsigned long)starget->hostdata; @@ -2203,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) if (rsp_rc != 0) { sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); - return -EIO; + /* If failure is received, the host adapter is most likely going + through reset, return success so the caller will wait for the command + being cancelled to get returned */ + return 0; } sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); @@ -2216,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) if (status != IBMVFC_MAD_SUCCESS) { sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); - return -EIO; + switch (status) { + case IBMVFC_MAD_DRIVER_FAILED: + case IBMVFC_MAD_CRQ_ERROR: + /* Host adapter most likely going through reset, return success to + the caller will wait for the command being cancelled to get returned */ + return 0; + default: + return -EIO; + }; } sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); @@ -2327,7 +2343,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) timeout = wait_for_completion_timeout(&evt->comp, timeout); if (!timeout) { - rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + rc = ibmvfc_cancel_all(sdev, 0); if (!rc) { rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); if (rc == SUCCESS) @@ -2383,24 +2399,30 @@ out: * @cmd: scsi command to abort * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, abort_rc; + int cancel_rc, block_rc; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - abort_rc = ibmvfc_abort_task_set(sdev); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + ibmvfc_abort_task_set(sdev); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); - if (!cancel_rc && !abort_rc) + if (!cancel_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2410,29 +2432,47 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, reset_rc; + int cancel_rc, block_rc, reset_rc = 0; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } /** + * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); +} + +/** * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * @sdev: scsi device struct * @data: return code @@ -2449,26 +2489,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); - int reset_rc; + int block_rc; + int reset_rc = 0; int rc = FAILED; unsigned long cancel_rc = 0; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + if (block_rc != FAST_IO_FAIL) { + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + } else + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2480,12 +2527,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { - int rc; + int rc, block_rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); + + if (block_rc == FAST_IO_FAIL) + return FAST_IO_FAIL; + return rc ? FAILED : SUCCESS; } @@ -2509,8 +2560,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) dev_rport = starget_to_rport(scsi_target(sdev)); if (dev_rport != rport) continue; - ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - ibmvfc_abort_task_set(sdev); + ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); } rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); @@ -4465,7 +4515,7 @@ static int ibmvfc_work(void *data) struct ibmvfc_host *vhost = data; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); while (1) { rc = wait_event_interruptible(vhost->work_wait_q, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3be8af624e6..017a5290e8c 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.10" -#define IBMVFC_DRIVER_DATE "(August 24, 2012)" +#define IBMVFC_DRIVER_VERSION "1.0.11" +#define IBMVFC_DRIVER_DATE "(April 12, 2013)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp { u16 error; u32 flags; #define IBMVFC_NATIVE_FC 0x01 -#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; u64 capabilities; #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 +#define IBMVFC_CAN_SUPPRESS_ABTS 0x10 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -351,6 +351,7 @@ struct ibmvfc_tmf { #define IBMVFC_TMF_LUN_RESET 0x10 #define IBMVFC_TMF_TGT_RESET 0x20 #define IBMVFC_TMF_LUA_VALID 0x40 +#define IBMVFC_TMF_SUPPRESS_ABTS 0x80 u32 cancel_key; u32 my_cancel_key; u32 pad; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index a044f593e8b..7b23f21f22f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + rmb(); } else crq = NULL; spin_unlock_irqrestore(&queue->lock, flags); @@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, { struct vio_dev *vdev = to_vio_dev(hostdata->dev); + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the VIOS to prevent it from fetching any stale data. + */ + mb(); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } @@ -241,7 +251,7 @@ static void gather_partition_info(void) struct device_node *rootdn; const char *ppartition_name; - const unsigned int *p_number_ptr; + const __be32 *p_number_ptr; /* Retrieve information about this partition */ rootdn = of_find_node_by_path("/"); @@ -255,7 +265,7 @@ static void gather_partition_info(void) sizeof(partition_name)); p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); if (p_number_ptr) - partition_number = *p_number_ptr; + partition_number = of_read_number(p_number_ptr, 1); of_node_put(rootdn); } @@ -270,10 +280,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata) strncpy(hostdata->madapter_info.partition_name, partition_name, sizeof(hostdata->madapter_info.partition_name)); - hostdata->madapter_info.partition_number = partition_number; + hostdata->madapter_info.partition_number = + cpu_to_be32(partition_number); - hostdata->madapter_info.mad_version = 1; - hostdata->madapter_info.os_type = 2; + hostdata->madapter_info.mad_version = cpu_to_be32(1); + hostdata->madapter_info.os_type = cpu_to_be32(2); } /** @@ -464,9 +475,9 @@ static int initialize_event_pool(struct event_pool *pool, memset(&evt->crq, 0x00, sizeof(evt->crq)); atomic_set(&evt->free, 1); evt->crq.valid = 0x80; - evt->crq.IU_length = sizeof(*evt->xfer_iu); - evt->crq.IU_data_ptr = pool->iu_token + - sizeof(*evt->xfer_iu) * i; + evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); + evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + + sizeof(*evt->xfer_iu) * i); evt->xfer_iu = pool->iu_storage + i; evt->hostdata = hostdata; evt->ext_list = NULL; @@ -588,7 +599,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct, evt_struct->cmnd_done = NULL; evt_struct->sync_srp = NULL; evt_struct->crq.format = format; - evt_struct->crq.timeout = timeout; + evt_struct->crq.timeout = cpu_to_be16(timeout); evt_struct->done = done; } @@ -659,8 +670,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg, scsi_for_each_sg(cmd, sg, nseg, i) { struct srp_direct_buf *descr = md + i; - descr->va = sg_dma_address(sg); - descr->len = sg_dma_len(sg); + descr->va = cpu_to_be64(sg_dma_address(sg)); + descr->len = cpu_to_be32(sg_dma_len(sg)); descr->key = 0; total_length += sg_dma_len(sg); } @@ -703,13 +714,14 @@ static int map_sg_data(struct scsi_cmnd *cmd, } indirect->table_desc.va = 0; - indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); + indirect->table_desc.len = cpu_to_be32(sg_mapped * + sizeof(struct srp_direct_buf)); indirect->table_desc.key = 0; if (sg_mapped <= MAX_INDIRECT_BUFS) { total_length = map_sg_list(cmd, sg_mapped, &indirect->desc_list[0]); - indirect->len = total_length; + indirect->len = cpu_to_be32(total_length); return 1; } @@ -731,9 +743,10 @@ static int map_sg_data(struct scsi_cmnd *cmd, total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); - indirect->len = total_length; - indirect->table_desc.va = evt_struct->ext_list_token; - indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); + indirect->len = cpu_to_be32(total_length); + indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token); + indirect->table_desc.len = cpu_to_be32(sg_mapped * + sizeof(indirect->desc_list[0])); memcpy(indirect->desc_list, evt_struct->ext_list, MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); return 1; @@ -794,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) evt->hostdata->dev); if (evt->cmnd_done) evt->cmnd_done(evt->cmnd); - } else if (evt->done) + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) evt->done(evt); free_event_struct(&evt->hostdata->pool, evt); spin_lock_irqsave(hostdata->host->host_lock, flags); @@ -849,7 +863,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, struct ibmvscsi_host_data *hostdata, unsigned long timeout) { - u64 *crq_as_u64 = (u64 *) &evt_struct->crq; + __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq; int request_status = 0; int rc; int srp_req = 0; @@ -920,8 +934,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, add_timer(&evt_struct->timer); } - if ((rc = - ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { + rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), + be64_to_cpu(crq_as_u64[1])); + if (rc != 0) { list_del(&evt_struct->list); del_timer(&evt_struct->timer); @@ -987,15 +1002,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) memcpy(cmnd->sense_buffer, rsp->data, - rsp->sense_data_len); + be32_to_cpu(rsp->sense_data_len)); unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, evt_struct->hostdata->dev); if (rsp->flags & SRP_RSP_FLAG_DOOVER) - scsi_set_resid(cmnd, rsp->data_out_res_cnt); + scsi_set_resid(cmnd, + be32_to_cpu(rsp->data_out_res_cnt)); else if (rsp->flags & SRP_RSP_FLAG_DIOVER) - scsi_set_resid(cmnd, rsp->data_in_res_cnt); + scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt)); } if (evt_struct->cmnd_done) @@ -1037,7 +1053,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); srp_cmd->opcode = SRP_CMD; memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); - srp_cmd->lun = ((u64) lun) << 48; + srp_cmd->lun = cpu_to_be64(((u64)lun) << 48); if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { if (!firmware_has_feature(FW_FEATURE_CMO)) @@ -1062,9 +1078,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, if ((in_fmt == SRP_DATA_DESC_INDIRECT || out_fmt == SRP_DATA_DESC_INDIRECT) && indirect->table_desc.va == 0) { - indirect->table_desc.va = evt_struct->crq.IU_data_ptr + + indirect->table_desc.va = + cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) + offsetof(struct srp_cmd, add_data) + - offsetof(struct srp_indirect_buf, desc_list); + offsetof(struct srp_indirect_buf, desc_list)); } return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); @@ -1158,7 +1175,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) * request_limit could have been set to -1 by this client. */ atomic_set(&hostdata->request_limit, - evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); + be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta)); /* If we had any pending I/Os, kick them */ scsi_unblock_requests(hostdata->host); @@ -1184,8 +1201,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) login = &evt_struct->iu.srp.login_req; memset(login, 0, sizeof(*login)); login->opcode = SRP_LOGIN_REQ; - login->req_it_iu_len = sizeof(union srp_iu); - login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; + login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); + login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | + SRP_BUF_FORMAT_INDIRECT); spin_lock_irqsave(hostdata->host->host_lock, flags); /* Start out with a request limit of 0, since this is negotiated in @@ -1214,12 +1232,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct) dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", evt_struct->xfer_iu->mad.capabilities.common.status); } else { - if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) + if (hostdata->caps.migration.common.server_support != + cpu_to_be16(SERVER_SUPPORTS_CAP)) dev_info(hostdata->dev, "Partition migration not supported\n"); if (client_reserve) { if (hostdata->caps.reserve.common.server_support == - SERVER_SUPPORTS_CAP) + cpu_to_be16(SERVER_SUPPORTS_CAP)) dev_info(hostdata->dev, "Client reserve enabled\n"); else dev_info(hostdata->dev, "Client reserve not supported\n"); @@ -1251,9 +1270,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) req = &evt_struct->iu.mad.capabilities; memset(req, 0, sizeof(*req)); - hostdata->caps.flags = CAP_LIST_SUPPORTED; + hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED); if (hostdata->client_migrated) - hostdata->caps.flags |= CLIENT_MIGRATED; + hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), sizeof(hostdata->caps.name)); @@ -1264,22 +1283,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; - req->common.type = VIOSRP_CAPABILITIES_TYPE; - req->buffer = hostdata->caps_addr; + req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); + req->buffer = cpu_to_be64(hostdata->caps_addr); - hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; - hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); - hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; - hostdata->caps.migration.ecl = 1; + hostdata->caps.migration.common.cap_type = + cpu_to_be32(MIGRATION_CAPABILITIES); + hostdata->caps.migration.common.length = + cpu_to_be16(sizeof(hostdata->caps.migration)); + hostdata->caps.migration.common.server_support = + cpu_to_be16(SERVER_SUPPORTS_CAP); + hostdata->caps.migration.ecl = cpu_to_be32(1); if (client_reserve) { - hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; - hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); - hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; - hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; - req->common.length = sizeof(hostdata->caps); + hostdata->caps.reserve.common.cap_type = + cpu_to_be32(RESERVATION_CAPABILITIES); + hostdata->caps.reserve.common.length = + cpu_to_be16(sizeof(hostdata->caps.reserve)); + hostdata->caps.reserve.common.server_support = + cpu_to_be16(SERVER_SUPPORTS_CAP); + hostdata->caps.reserve.type = + cpu_to_be32(CLIENT_RESERVE_SCSI_2); + req->common.length = + cpu_to_be16(sizeof(hostdata->caps)); } else - req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); + req->common.length = cpu_to_be16(sizeof(hostdata->caps) - + sizeof(hostdata->caps.reserve)); spin_lock_irqsave(hostdata->host->host_lock, flags); if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) @@ -1297,7 +1325,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) static void fast_fail_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; + u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status); if (status == VIOSRP_MAD_NOT_SUPPORTED) dev_err(hostdata->dev, "fast_fail not supported in server\n"); @@ -1334,8 +1362,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) fast_fail_mad = &evt_struct->iu.mad.fast_fail; memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); - fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; - fast_fail_mad->common.length = sizeof(*fast_fail_mad); + fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL); + fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad)); spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); @@ -1362,15 +1390,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) "host partition %s (%d), OS %d, max io %u\n", hostdata->madapter_info.srp_version, hostdata->madapter_info.partition_name, - hostdata->madapter_info.partition_number, - hostdata->madapter_info.os_type, - hostdata->madapter_info.port_max_txu[0]); + be32_to_cpu(hostdata->madapter_info.partition_number), + be32_to_cpu(hostdata->madapter_info.os_type), + be32_to_cpu(hostdata->madapter_info.port_max_txu[0])); if (hostdata->madapter_info.port_max_txu[0]) hostdata->host->max_sectors = - hostdata->madapter_info.port_max_txu[0] >> 9; + be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; - if (hostdata->madapter_info.os_type == 3 && + if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 && strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", hostdata->madapter_info.srp_version); @@ -1379,7 +1407,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; } - if (hostdata->madapter_info.os_type == 3) { + if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) { enable_fast_fail(hostdata); return; } @@ -1414,9 +1442,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) req = &evt_struct->iu.mad.adapter_info; memset(req, 0x00, sizeof(*req)); - req->common.type = VIOSRP_ADAPTER_INFO_TYPE; - req->common.length = sizeof(hostdata->madapter_info); - req->buffer = hostdata->adapter_info_addr; + req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE); + req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info)); + req->buffer = cpu_to_be64(hostdata->adapter_info_addr); spin_lock_irqsave(hostdata->host->host_lock, flags); if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) @@ -1501,7 +1529,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) /* Set up an abort SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = ((u64) lun) << 48; + tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; tsk_mgmt->task_tag = (u64) found_evt; @@ -1624,7 +1652,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) /* Set up a lun reset SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = ((u64) lun) << 48; + tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; evt->sync_srp = &srp_rsp; @@ -1735,8 +1763,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, { long rc; unsigned long flags; + /* The hypervisor copies our tag value here so no byteswapping */ struct srp_event_struct *evt_struct = - (struct srp_event_struct *)crq->IU_data_ptr; + (__force struct srp_event_struct *)crq->IU_data_ptr; switch (crq->valid) { case 0xC0: /* initialization */ switch (crq->format) { @@ -1792,18 +1821,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, */ if (!valid_event_struct(&hostdata->pool, evt_struct)) { dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", - (void *)crq->IU_data_ptr); + evt_struct); return; } if (atomic_read(&evt_struct->free)) { dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", - (void *)crq->IU_data_ptr); + evt_struct); return; } if (crq->format == VIOSRP_SRP_FORMAT) - atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, + atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), &hostdata->request_limit); del_timer(&evt_struct->timer); @@ -1856,13 +1885,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, /* Set up a lun reset SRP command */ memset(host_config, 0x00, sizeof(*host_config)); - host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; - host_config->common.length = length; - host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, - length, - DMA_BIDIRECTIONAL); + host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE); + host_config->common.length = cpu_to_be16(length); + addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL); - if (dma_mapping_error(hostdata->dev, host_config->buffer)) { + if (dma_mapping_error(hostdata->dev, addr)) { if (!firmware_has_feature(FW_FEATURE_CMO)) dev_err(hostdata->dev, "dma_mapping error getting host config\n"); @@ -1870,6 +1897,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, return -1; } + host_config->buffer = cpu_to_be64(addr); + init_completion(&evt_struct->comp); spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); @@ -1899,8 +1928,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) sdev->allow_restart = 1; blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } - scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); spin_unlock_irqrestore(shost->host_lock, lock_flags); + scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); return 0; } @@ -2195,7 +2224,7 @@ static int ibmvscsi_work(void *data) struct ibmvscsi_host_data *hostdata = data; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); while (1) { rc = wait_event_interruptible(hostdata->work_wait_q, diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index bf9eca84516..56f8a861ed7 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c @@ -589,7 +589,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target) } err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt, - IRQF_DISABLED, "ibmvstgt", target); + 0, "ibmvstgt", target); if (err) goto req_irq_failed; diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 2cd735d1d19..11624308762 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h @@ -75,9 +75,9 @@ struct viosrp_crq { u8 format; /* SCSI vs out-of-band */ u8 reserved; u8 status; /* non-scsi failure? (e.g. DMA failure) */ - u16 timeout; /* in seconds */ - u16 IU_length; /* in bytes */ - u64 IU_data_ptr; /* the TCE for transferring data */ + __be16 timeout; /* in seconds */ + __be16 IU_length; /* in bytes */ + __be64 IU_data_ptr; /* the TCE for transferring data */ }; /* MADs are Management requests above and beyond the IUs defined in the SRP @@ -124,10 +124,10 @@ enum viosrp_capability_flag { * Common MAD header */ struct mad_common { - u32 type; - u16 status; - u16 length; - u64 tag; + __be32 type; + __be16 status; + __be16 length; + __be64 tag; }; /* @@ -139,23 +139,23 @@ struct mad_common { */ struct viosrp_empty_iu { struct mad_common common; - u64 buffer; - u32 port; + __be64 buffer; + __be32 port; }; struct viosrp_error_log { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct viosrp_adapter_info { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct viosrp_host_config { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct viosrp_fast_fail { @@ -164,27 +164,27 @@ struct viosrp_fast_fail { struct viosrp_capabilities { struct mad_common common; - u64 buffer; + __be64 buffer; }; struct mad_capability_common { - u32 cap_type; - u16 length; - u16 server_support; + __be32 cap_type; + __be16 length; + __be16 server_support; }; struct mad_reserve_cap { struct mad_capability_common common; - u32 type; + __be32 type; }; struct mad_migration_cap { struct mad_capability_common common; - u32 ecl; + __be32 ecl; }; struct capabilities{ - u32 flags; + __be32 flags; char name[SRP_MAX_LOC_LEN]; char loc[SRP_MAX_LOC_LEN]; struct mad_migration_cap migration; @@ -208,10 +208,10 @@ union viosrp_iu { struct mad_adapter_info_data { char srp_version[8]; char partition_name[96]; - u32 partition_number; - u32 mad_version; - u32 os_type; - u32 port_max_txu[8]; /* per-port maximum transfer */ + __be32 partition_number; + __be32 mad_version; + __be32 os_type; + __be32 port_max_txu[8]; /* per-port maximum transfer */ }; #endif diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 26cd9d1d757..89a8266560d 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -121,45 +121,26 @@ static inline void imm_pb_release(imm_struct *dev) * testing... * Also gives a method to use a script to obtain optimum timings (TODO) */ -static inline int imm_proc_write(imm_struct *dev, char *buffer, int length) +static int imm_write_info(struct Scsi_Host *host, char *buffer, int length) { - unsigned long x; + imm_struct *dev = imm_dev(host); if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) { - x = simple_strtoul(buffer + 5, NULL, 0); - dev->mode = x; + dev->mode = simple_strtoul(buffer + 5, NULL, 0); return length; } printk("imm /proc: invalid variable\n"); - return (-EINVAL); + return -EINVAL; } -static int imm_proc_info(struct Scsi_Host *host, char *buffer, char **start, - off_t offset, int length, int inout) +static int imm_show_info(struct seq_file *m, struct Scsi_Host *host) { imm_struct *dev = imm_dev(host); - int len = 0; - - if (inout) - return imm_proc_write(dev, buffer, length); - - len += sprintf(buffer + len, "Version : %s\n", IMM_VERSION); - len += - sprintf(buffer + len, "Parport : %s\n", - dev->dev->port->name); - len += - sprintf(buffer + len, "Mode : %s\n", - IMM_MODE_STRING[dev->mode]); - /* Request for beyond end of buffer */ - if (offset > len) - return 0; - - *start = buffer + offset; - len -= offset; - if (len > length) - len = length; - return len; + seq_printf(m, "Version : %s\n", IMM_VERSION); + seq_printf(m, "Parport : %s\n", dev->dev->port->name); + seq_printf(m, "Mode : %s\n", IMM_MODE_STRING[dev->mode]); + return 0; } #if IMM_DEBUG > 0 @@ -1118,7 +1099,8 @@ static int imm_adjust_queue(struct scsi_device *device) static struct scsi_host_template imm_template = { .module = THIS_MODULE, .proc_name = "imm", - .proc_info = imm_proc_info, + .show_info = imm_show_info, + .write_info = imm_write_info, .name = "Iomega VPI2 (imm) interface", .queuecommand = imm_queuecommand, .eh_abort_handler = imm_abort, diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index deb5b6d8398..b1c4d831137 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c @@ -2015,7 +2015,7 @@ static int __init in2000_detect(struct scsi_host_template * tpnt) write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ write1_io(0, IO_INTR_MASK); /* allow all ints */ x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT]; - if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) { + if (request_irq(x, in2000_intr, 0, "in2000", instance)) { printk("in2000_detect: Unable to allocate IRQ.\n"); detect_count--; continue; @@ -2166,152 +2166,117 @@ static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, } -static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in) +static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len) { #ifdef PROC_INTERFACE char *bp; - char tbuf[128]; - unsigned long flags; struct IN2000_hostdata *hd; - Scsi_Cmnd *cmd; int x, i; - static int stop = 0; hd = (struct IN2000_hostdata *) instance->hostdata; -/* If 'in' is TRUE we need to _read_ the proc file. We accept the following - * keywords (same format as command-line, but only ONE per read): - * debug - * disconnect - * period - * resync - * proc - */ - - if (in) { - buf[len] = '\0'; - bp = buf; - if (!strncmp(bp, "debug:", 6)) { - bp += 6; - hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK; - } else if (!strncmp(bp, "disconnect:", 11)) { - bp += 11; - x = simple_strtoul(bp, NULL, 0); - if (x < DIS_NEVER || x > DIS_ALWAYS) - x = DIS_ADAPTIVE; - hd->disconnect = x; - } else if (!strncmp(bp, "period:", 7)) { - bp += 7; - x = simple_strtoul(bp, NULL, 0); - hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns; - } else if (!strncmp(bp, "resync:", 7)) { - bp += 7; - x = simple_strtoul(bp, NULL, 0); - for (i = 0; i < 7; i++) - if (x & (1 << i)) - hd->sync_stat[i] = SS_UNSET; - } else if (!strncmp(bp, "proc:", 5)) { - bp += 5; - hd->proc = simple_strtoul(bp, NULL, 0); - } else if (!strncmp(bp, "level2:", 7)) { - bp += 7; - hd->level2 = simple_strtoul(bp, NULL, 0); - } - return len; + buf[len] = '\0'; + bp = buf; + if (!strncmp(bp, "debug:", 6)) { + bp += 6; + hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK; + } else if (!strncmp(bp, "disconnect:", 11)) { + bp += 11; + x = simple_strtoul(bp, NULL, 0); + if (x < DIS_NEVER || x > DIS_ALWAYS) + x = DIS_ADAPTIVE; + hd->disconnect = x; + } else if (!strncmp(bp, "period:", 7)) { + bp += 7; + x = simple_strtoul(bp, NULL, 0); + hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns; + } else if (!strncmp(bp, "resync:", 7)) { + bp += 7; + x = simple_strtoul(bp, NULL, 0); + for (i = 0; i < 7; i++) + if (x & (1 << i)) + hd->sync_stat[i] = SS_UNSET; + } else if (!strncmp(bp, "proc:", 5)) { + bp += 5; + hd->proc = simple_strtoul(bp, NULL, 0); + } else if (!strncmp(bp, "level2:", 7)) { + bp += 7; + hd->level2 = simple_strtoul(bp, NULL, 0); } +#endif + return len; +} + +static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance) +{ + +#ifdef PROC_INTERFACE + unsigned long flags; + struct IN2000_hostdata *hd; + Scsi_Cmnd *cmd; + int x; + + hd = (struct IN2000_hostdata *) instance->hostdata; spin_lock_irqsave(instance->host_lock, flags); - bp = buf; - *bp = '\0'; - if (hd->proc & PR_VERSION) { - sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE); - strcat(bp, tbuf); - } + if (hd->proc & PR_VERSION) + seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE); + if (hd->proc & PR_INFO) { - sprintf(tbuf, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); - strcat(bp, tbuf); - strcat(bp, "\nsync_xfer[] = "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%02x", hd->sync_xfer[x]); - strcat(bp, tbuf); - } - strcat(bp, "\nsync_stat[] = "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%02x", hd->sync_stat[x]); - strcat(bp, tbuf); - } + seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); + seq_printf(m, "\nsync_xfer[] = "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%02x", hd->sync_xfer[x]); + seq_printf(m, "\nsync_stat[] = "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%02x", hd->sync_stat[x]); } #ifdef PROC_STATISTICS if (hd->proc & PR_STATISTICS) { - strcat(bp, "\ncommands issued: "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]); - strcat(bp, tbuf); - } - strcat(bp, "\ndisconnects allowed:"); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]); - strcat(bp, tbuf); - } - strcat(bp, "\ndisconnects done: "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]); - strcat(bp, tbuf); - } - sprintf(tbuf, "\ninterrupts: \t%ld", hd->int_cnt); - strcat(bp, tbuf); + seq_printf(m, "\ncommands issued: "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->cmd_cnt[x]); + seq_printf(m, "\ndisconnects allowed:"); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); + seq_printf(m, "\ndisconnects done: "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); + seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt); } #endif if (hd->proc & PR_CONNECTED) { - strcat(bp, "\nconnected: "); + seq_printf(m, "\nconnected: "); if (hd->connected) { cmd = (Scsi_Cmnd *) hd->connected; - sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); - strcat(bp, tbuf); + seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); } } if (hd->proc & PR_INPUTQ) { - strcat(bp, "\ninput_Q: "); + seq_printf(m, "\ninput_Q: "); cmd = (Scsi_Cmnd *) hd->input_Q; while (cmd) { - sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); - strcat(bp, tbuf); + seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_DISCQ) { - strcat(bp, "\ndisconnected_Q:"); + seq_printf(m, "\ndisconnected_Q:"); cmd = (Scsi_Cmnd *) hd->disconnected_Q; while (cmd) { - sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); - strcat(bp, tbuf); + seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_TEST) { ; /* insert your own custom function here */ } - strcat(bp, "\n"); + seq_printf(m, "\n"); spin_unlock_irqrestore(instance->host_lock, flags); - *start = buf; - if (stop) { - stop = 0; - return 0; /* return 0 to signal end-of-file */ - } - if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */ - stop = 1; - if (hd->proc & PR_STOP) /* stop every other time */ - stop = 1; - return strlen(bp); - -#else /* PROC_INTERFACE */ - - return 0; - #endif /* PROC_INTERFACE */ - + return 0; } MODULE_LICENSE("GPL"); @@ -2319,7 +2284,8 @@ MODULE_LICENSE("GPL"); static struct scsi_host_template driver_template = { .proc_name = "in2000", - .proc_info = in2000_proc_info, + .write_info = in2000_write_info, + .show_info = in2000_show_info, .name = "Always IN2000", .detect = in2000_detect, .release = in2000_release, diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 280d5af113d..e5dae7b54d9 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -2931,7 +2931,7 @@ static int initio_probe_one(struct pci_dev *pdev, shost->base = host->addr; shost->sg_tablesize = TOTAL_SG_ENTRY; - error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost); + error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost); if (error < 0) { printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq); goto out_free_scbs; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f328089a106..924b0ba74df 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -220,7 +220,7 @@ module_param_named(max_devs, ipr_max_devs, int, 0); MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); module_param_named(number_of_msix, ipr_number_of_msix, int, 0); -MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)"); +MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)"); MODULE_LICENSE("GPL"); MODULE_VERSION(IPR_DRIVER_VERSION); @@ -281,12 +281,22 @@ struct ipr_error_table_t ipr_error_table[] = { "FFF6: Failure prediction threshold exceeded"}, {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL, "8009: Impending cache battery pack failure"}, + {0x02040100, 0, 0, + "Logical Unit in process of becoming ready"}, + {0x02040200, 0, 0, + "Initializing command required"}, {0x02040400, 0, 0, "34FF: Disk device format in progress"}, + {0x02040C00, 0, 0, + "Logical unit not accessible, target port in unavailable state"}, {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL, "9070: IOA requested reset"}, {0x023F0000, 0, 0, "Synchronization required"}, + {0x02408500, 0, 0, + "IOA microcode download required"}, + {0x02408600, 0, 0, + "Device bus connection is prohibited by host"}, {0x024E0000, 0, 0, "No ready, IOA shutdown"}, {0x025A0000, 0, 0, @@ -385,6 +395,8 @@ struct ipr_error_table_t ipr_error_table[] = { "4030: Incorrect multipath connection"}, {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL, "4110: Unsupported enclosure function"}, + {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL, + "4120: SAS cable VPD cannot be read"}, {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL, "FFF4: Command to logical unit failed"}, {0x05240000, 1, 0, @@ -407,10 +419,18 @@ struct ipr_error_table_t ipr_error_table[] = { "Illegal request, command sequence error"}, {0x052C8000, 1, 0, "Illegal request, dual adapter support not enabled"}, + {0x052C8100, 1, 0, + "Illegal request, another cable connector was physically disabled"}, + {0x054E8000, 1, 0, + "Illegal request, inconsistent group id/group count"}, {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL, "9031: Array protection temporarily suspended, protection resuming"}, {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL, "9040: Array protection temporarily suspended, protection resuming"}, + {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL, + "4080: IOA exceeded maximum operating temperature"}, + {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL, + "4085: Service required"}, {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL, "3140: Device bus not ready to ready transition"}, {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL, @@ -423,6 +443,8 @@ struct ipr_error_table_t ipr_error_table[] = { "FFFB: SCSI bus was reset by another initiator"}, {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL, "3029: A device replacement has occurred"}, + {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL, + "4102: Device bus fabric performance degradation"}, {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL, "9051: IOA cache data exists for a missing or failed device"}, {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL, @@ -445,6 +467,14 @@ struct ipr_error_table_t ipr_error_table[] = { "9076: Configuration error, missing remote IOA"}, {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL, "4050: Enclosure does not support a required multipath function"}, + {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL, + "4121: Configuration error, required cable is missing"}, + {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL, + "4122: Cable is not plugged into the correct location on remote IOA"}, + {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL, + "4123: Configuration error, invalid cable vital product data"}, + {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL, + "4124: Configuration error, both cable ends are plugged into the same IOA"}, {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL, "4070: Logically bad block written on device"}, {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL, @@ -507,10 +537,18 @@ struct ipr_error_table_t ipr_error_table[] = { "9062: One or more disks are missing from an array"}, {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL, "9063: Maximum number of functional arrays has been exceeded"}, + {0x07279A00, 0, 0, + "Data protect, other volume set problem"}, {0x0B260000, 0, 0, "Aborted command, invalid descriptor"}, + {0x0B3F9000, 0, 0, + "Target operating conditions have changed, dual adapter takeover"}, + {0x0B530200, 0, 0, + "Aborted command, medium removal prevented"}, {0x0B5A0000, 0, 0, - "Command terminated by host"} + "Command terminated by host"}, + {0x0B5B8000, 0, 0, + "Aborted command, command terminated by host"} }; static const struct ipr_ses_table_entry ipr_ses_table[] = { @@ -1105,6 +1143,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, res->add_to_ml = 0; res->del_from_ml = 0; res->resetting_device = 0; + res->reset_occurred = 0; res->sdev = NULL; res->sata_port = NULL; @@ -2329,6 +2368,42 @@ static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, } /** + * ipr_log_sis64_device_error - Log a cache error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_21_error *error; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + error = &hostrcb->hcam.u.error64.u.type_21_error; + + ipr_err("-----Failing Device Information-----\n"); + ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", + be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), + be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); + ipr_err("Device Resource Path: %s\n", + __ipr_format_res_path(error->res_path, + buffer, sizeof(buffer))); + error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; + error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; + ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); + ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc); + ipr_err("SCSI Sense Data:\n"); + ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); + ipr_err("SCSI Command Descriptor Block: \n"); + ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); + + ipr_err("Additional IOA Data:\n"); + ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); +} + +/** * ipr_get_error - Find the specfied IOASC in the ipr_error_table. * @ioasc: IOASC * @@ -2429,6 +2504,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, case IPR_HOST_RCB_OVERLAY_ID_20: ipr_log_fabric_error(ioa_cfg, hostrcb); break; + case IPR_HOST_RCB_OVERLAY_ID_21: + ipr_log_sis64_device_error(ioa_cfg, hostrcb); + break; case IPR_HOST_RCB_OVERLAY_ID_23: ipr_log_sis64_config_error(ioa_cfg, hostrcb); break; @@ -3592,16 +3670,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev, return strlen(buf); } - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); } spin_lock_irqsave(shost->host_lock, lock_flags); ioa_cfg->iopoll_weight = user_iopoll_weight; - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); @@ -4777,7 +4853,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd) ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (!ioa_cfg->in_reset_reload) { + if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); dev_err(&ioa_cfg->pdev->dev, "Adapter being reset as a result of error recovery.\n"); @@ -4977,6 +5053,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) } else rc = ipr_device_reset(ioa_cfg, res); res->resetting_device = 0; + res->reset_occurred = 1; LEAVE; return rc ? FAILED : SUCCESS; @@ -5148,7 +5225,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) ipr_trace; } - list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); if (!ipr_is_naca_model(res)) res->needs_sync_complete = 1; @@ -5446,8 +5523,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) return IRQ_NONE; } - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == hrrq->toggle_bit) { if (!blk_iopoll_sched_prep(&hrrq->iopoll)) @@ -6145,8 +6221,10 @@ static int ipr_queuecommand(struct Scsi_Host *shost, ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; - if (ipr_is_gscsi(res)) + if (ipr_is_gscsi(res) && res->reset_occurred) { + res->reset_occurred = 0; ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; + } ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); } @@ -6267,7 +6345,8 @@ static struct scsi_host_template driver_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = ipr_ioa_attrs, .sdev_attrs = ipr_dev_attrs, - .proc_name = IPR_NAME + .proc_name = IPR_NAME, + .no_write_same = 1, }; /** @@ -6421,7 +6500,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, { u32 ioadl_flags = 0; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; struct ipr_ioadl64_desc *last_ioadl64 = NULL; int len = qc->nbytes; struct scatterlist *sg; @@ -6441,7 +6520,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); ioarcb->u.sis64_addr_data.data_ioadl_addr = - cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); for_each_sg(qc->sg, sg, qc->n_elem, si) { ioadl64->flags = cpu_to_be32(ioadl_flags); @@ -6662,7 +6741,6 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc) tf->hob_lbal = g->hob_lbal; tf->hob_lbam = g->hob_lbam; tf->hob_lbah = g->hob_lbah; - tf->ctl = g->alt_status; return true; } @@ -6739,6 +6817,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int i; ENTER; if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { @@ -6750,6 +6829,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) ioa_cfg->in_reset_reload = 0; ioa_cfg->reset_retries = 0; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); wake_up_all(&ioa_cfg->reset_wait_q); LEAVE; @@ -8595,6 +8681,25 @@ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd) } /** + * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled + * @pdev: PCI device struct + * + * Description: This routine is called to tell us that the MMIO + * access to the IOA has been restored + */ +static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) +{ + unsigned long flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (!ioa_cfg->probe_done) + pci_save_state(pdev); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + return PCI_ERS_RESULT_NEED_RESET; +} + +/** * ipr_pci_frozen - Called when slot has experienced a PCI bus error. * @pdev: PCI device struct * @@ -8608,7 +8713,8 @@ static void ipr_pci_frozen(struct pci_dev *pdev) struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); + if (ioa_cfg->probe_done) + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } @@ -8626,11 +8732,14 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev) struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - if (ioa_cfg->needs_warm_reset) - ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); - else - _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, - IPR_SHUTDOWN_NONE); + if (ioa_cfg->probe_done) { + if (ioa_cfg->needs_warm_reset) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + else + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, + IPR_SHUTDOWN_NONE); + } else + wake_up_all(&ioa_cfg->eeh_wait_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); return PCI_ERS_RESULT_RECOVERED; } @@ -8649,17 +8758,20 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) int i; spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) - ioa_cfg->sdt_state = ABORT_DUMP; - ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; - ioa_cfg->in_ioa_bringdown = 1; - for (i = 0; i < ioa_cfg->hrrq_num; i++) { - spin_lock(&ioa_cfg->hrrq[i]._lock); - ioa_cfg->hrrq[i].allow_cmds = 0; - spin_unlock(&ioa_cfg->hrrq[i]._lock); - } - wmb(); - ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + if (ioa_cfg->probe_done) { + if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; + ioa_cfg->in_ioa_bringdown = 1; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].allow_cmds = 0; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } else + wake_up_all(&ioa_cfg->eeh_wait_q); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } @@ -8679,7 +8791,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev, switch (state) { case pci_channel_io_frozen: ipr_pci_frozen(pdev); - return PCI_ERS_RESULT_NEED_RESET; + return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_perm_failure: ipr_pci_perm_failure(pdev); return PCI_ERS_RESULT_DISCONNECT; @@ -8709,6 +8821,7 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); + ioa_cfg->probe_done = 1; if (ioa_cfg->needs_hard_reset) { ioa_cfg->needs_hard_reset = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); @@ -8972,19 +9085,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) if (!ioa_cfg->res_entries) goto out; - if (ioa_cfg->sis64) { - ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); - ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); - ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) * - BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); - - if (!ioa_cfg->target_ids || !ioa_cfg->array_ids - || !ioa_cfg->vset_ids) - goto out_free_res_entries; - } - for (i = 0; i < ioa_cfg->max_devs_supported; i++) { list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; @@ -8997,16 +9097,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) if (!ioa_cfg->vpd_cbs) goto out_free_res_entries; - for (i = 0; i < ioa_cfg->hrrq_num; i++) { - INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); - INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); - spin_lock_init(&ioa_cfg->hrrq[i]._lock); - if (i == 0) - ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; - else - ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; - } - if (ipr_alloc_cmd_blks(ioa_cfg)) goto out_free_vpd_cbs; @@ -9081,9 +9171,6 @@ out_free_vpd_cbs: ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); out_free_res_entries: kfree(ioa_cfg->res_entries); - kfree(ioa_cfg->target_ids); - kfree(ioa_cfg->array_ids); - kfree(ioa_cfg->vset_ids); goto out; } @@ -9110,6 +9197,48 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) } /** + * ipr_init_regs - Initialize IOA registers + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) +{ + const struct ipr_interrupt_offsets *p; + struct ipr_interrupts *t; + void __iomem *base; + + p = &ioa_cfg->chip_cfg->regs; + t = &ioa_cfg->regs; + base = ioa_cfg->hdw_dma_regs; + + t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; + t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; + t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; + t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; + t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; + t->clr_interrupt_reg = base + p->clr_interrupt_reg; + t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; + t->sense_interrupt_reg = base + p->sense_interrupt_reg; + t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; + t->ioarrin_reg = base + p->ioarrin_reg; + t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; + t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; + t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; + t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; + t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; + t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; + + if (ioa_cfg->sis64) { + t->init_feedback_reg = base + p->init_feedback_reg; + t->dump_addr_reg = base + p->dump_addr_reg; + t->dump_data_reg = base + p->dump_data_reg; + t->endian_swap_reg = base + p->endian_swap_reg; + } +} + +/** * ipr_init_ioa_cfg - Initialize IOA config struct * @ioa_cfg: ioa config struct * @host: scsi host struct @@ -9121,9 +9250,7 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, struct Scsi_Host *host, struct pci_dev *pdev) { - const struct ipr_interrupt_offsets *p; - struct ipr_interrupts *t; - void __iomem *base; + int i; ioa_cfg->host = host; ioa_cfg->pdev = pdev; @@ -9143,6 +9270,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); init_waitqueue_head(&ioa_cfg->reset_wait_q); init_waitqueue_head(&ioa_cfg->msi_wait_q); + init_waitqueue_head(&ioa_cfg->eeh_wait_q); ioa_cfg->sdt_state = INACTIVE; ipr_initialize_bus_attr(ioa_cfg); @@ -9153,44 +9281,33 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_SIS64_DEVS) ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) + + ((sizeof(struct ipr_config_table_entry64) + * ioa_cfg->max_devs_supported))); } else { host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) + + ((sizeof(struct ipr_config_table_entry) + * ioa_cfg->max_devs_supported))); } + host->max_channel = IPR_MAX_BUS_TO_SCAN; host->unique_id = host->host_no; host->max_cmd_len = IPR_MAX_CDB_LEN; host->can_queue = ioa_cfg->max_cmds; pci_set_drvdata(pdev, ioa_cfg); - p = &ioa_cfg->chip_cfg->regs; - t = &ioa_cfg->regs; - base = ioa_cfg->hdw_dma_regs; - - t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; - t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; - t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; - t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; - t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; - t->clr_interrupt_reg = base + p->clr_interrupt_reg; - t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; - t->sense_interrupt_reg = base + p->sense_interrupt_reg; - t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; - t->ioarrin_reg = base + p->ioarrin_reg; - t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; - t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; - t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; - t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; - t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; - t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; - - if (ioa_cfg->sis64) { - t->init_feedback_reg = base + p->init_feedback_reg; - t->dump_addr_reg = base + p->dump_addr_reg; - t->dump_data_reg = base + p->dump_data_reg; - t->endian_swap_reg = base + p->endian_swap_reg; + for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { + INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); + INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); + spin_lock_init(&ioa_cfg->hrrq[i]._lock); + if (i == 0) + ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; + else + ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; } } @@ -9213,54 +9330,63 @@ ipr_get_chip_info(const struct pci_device_id *dev_id) return NULL; } +/** + * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete + * during probe time + * @ioa_cfg: ioa config struct + * + * Return value: + * None + **/ +static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) +{ + struct pci_dev *pdev = ioa_cfg->pdev; + + if (pci_channel_offline(pdev)) { + wait_event_timeout(ioa_cfg->eeh_wait_q, + !pci_channel_offline(pdev), + IPR_PCI_ERROR_RECOVERY_TIMEOUT); + pci_restore_state(pdev); + } +} + static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg) { struct msix_entry entries[IPR_MAX_MSIX_VECTORS]; - int i, err, vectors; + int i, vectors; for (i = 0; i < ARRAY_SIZE(entries); ++i) entries[i].entry = i; - vectors = ipr_number_of_msix; - - while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0) - vectors = err; - - if (err < 0) { - pci_disable_msix(ioa_cfg->pdev); - return err; + vectors = pci_enable_msix_range(ioa_cfg->pdev, + entries, 1, ipr_number_of_msix); + if (vectors < 0) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + return vectors; } - if (!err) { - for (i = 0; i < vectors; i++) - ioa_cfg->vectors_info[i].vec = entries[i].vector; - ioa_cfg->nvectors = vectors; - } + for (i = 0; i < vectors; i++) + ioa_cfg->vectors_info[i].vec = entries[i].vector; + ioa_cfg->nvectors = vectors; - return err; + return 0; } static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg) { - int i, err, vectors; - - vectors = ipr_number_of_msix; + int i, vectors; - while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0) - vectors = err; - - if (err < 0) { - pci_disable_msi(ioa_cfg->pdev); - return err; + vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix); + if (vectors < 0) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + return vectors; } - if (!err) { - for (i = 0; i < vectors; i++) - ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; - ioa_cfg->nvectors = vectors; - } + for (i = 0; i < vectors; i++) + ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; + ioa_cfg->nvectors = vectors; - return err; + return 0; } static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) @@ -9325,7 +9451,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp) * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support. * @pdev: PCI device struct * - * Description: The return value from pci_enable_msi() can not always be + * Description: The return value from pci_enable_msi_range() can not always be * trusted. This routine sets up and initiates a test interrupt to determine * if the interrupt is received via the ipr_test_intr() service routine. * If the tests fails, the driver will fall back to LSI. @@ -9349,7 +9475,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + if (ioa_cfg->intr_flag == IPR_USE_MSIX) + rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); + else + rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); if (rc) { dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); return rc; @@ -9371,7 +9500,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - free_irq(pdev->irq, ioa_cfg); + if (ioa_cfg->intr_flag == IPR_USE_MSIX) + free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); + else + free_irq(pdev->irq, ioa_cfg); LEAVE; @@ -9394,23 +9526,17 @@ static int ipr_probe_ioa(struct pci_dev *pdev, void __iomem *ipr_regs; int rc = PCIBIOS_SUCCESSFUL; volatile u32 mask, uproc, interrupts; - unsigned long lock_flags; + unsigned long lock_flags, driver_lock_flags; ENTER; - if ((rc = pci_enable_device(pdev))) { - dev_err(&pdev->dev, "Cannot enable adapter\n"); - goto out; - } - dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); - host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); if (!host) { dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); rc = -ENOMEM; - goto out_disable; + goto out; } ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; @@ -9440,6 +9566,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev, ioa_cfg->revid = pdev->revision; + ipr_init_ioa_cfg(ioa_cfg, host, pdev); + ipr_regs_pci = pci_resource_start(pdev, 0); rc = pci_request_regions(pdev, IPR_NAME); @@ -9449,22 +9577,35 @@ static int ipr_probe_ioa(struct pci_dev *pdev, goto out_scsi_host_put; } + rc = pci_enable_device(pdev); + + if (rc || pci_channel_offline(pdev)) { + if (pci_channel_offline(pdev)) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + rc = pci_enable_device(pdev); + } + + if (rc) { + dev_err(&pdev->dev, "Cannot enable adapter\n"); + ipr_wait_for_pci_err_recovery(ioa_cfg); + goto out_release_regions; + } + } + ipr_regs = pci_ioremap_bar(pdev, 0); if (!ipr_regs) { dev_err(&pdev->dev, "Couldn't map memory range of registers\n"); rc = -ENOMEM; - goto out_release_regions; + goto out_disable; } ioa_cfg->hdw_dma_regs = ipr_regs; ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; - ipr_init_ioa_cfg(ioa_cfg, host, pdev); - - pci_set_master(pdev); + ipr_init_regs(ioa_cfg); if (ioa_cfg->sis64) { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -9472,7 +9613,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev, dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); } - } else rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -9486,10 +9626,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev, if (rc != PCIBIOS_SUCCESSFUL) { dev_err(&pdev->dev, "Write of cache line size failed\n"); + ipr_wait_for_pci_err_recovery(ioa_cfg); rc = -EIO; goto cleanup_nomem; } + /* Issue MMIO read to ensure card is not in EEH */ + interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); + ipr_wait_for_pci_err_recovery(ioa_cfg); + if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) { dev_err(&pdev->dev, "The max number of MSIX is %d\n", IPR_MAX_MSIX_VECTORS); @@ -9508,10 +9653,22 @@ static int ipr_probe_ioa(struct pci_dev *pdev, dev_info(&pdev->dev, "Cannot enable MSI.\n"); } + pci_set_master(pdev); + + if (pci_channel_offline(pdev)) { + ipr_wait_for_pci_err_recovery(ioa_cfg); + pci_set_master(pdev); + if (pci_channel_offline(pdev)) { + rc = -EIO; + goto out_msi_disable; + } + } + if (ioa_cfg->intr_flag == IPR_USE_MSI || ioa_cfg->intr_flag == IPR_USE_MSIX) { rc = ipr_test_msi(ioa_cfg, pdev); if (rc == -EOPNOTSUPP) { + ipr_wait_for_pci_err_recovery(ioa_cfg); if (ioa_cfg->intr_flag == IPR_USE_MSI) { ioa_cfg->intr_flag &= ~IPR_USE_MSI; pci_disable_msi(pdev); @@ -9541,30 +9698,12 @@ static int ipr_probe_ioa(struct pci_dev *pdev, (unsigned int)num_online_cpus(), (unsigned int)IPR_MAX_HRRQ_NUM); - /* Save away PCI config space for use following IOA reset */ - rc = pci_save_state(pdev); - - if (rc != PCIBIOS_SUCCESSFUL) { - dev_err(&pdev->dev, "Failed to save PCI config space\n"); - rc = -EIO; - goto out_msi_disable; - } - if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) goto out_msi_disable; if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) goto out_msi_disable; - if (ioa_cfg->sis64) - ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) - + ((sizeof(struct ipr_config_table_entry64) - * ioa_cfg->max_devs_supported))); - else - ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) - + ((sizeof(struct ipr_config_table_entry) - * ioa_cfg->max_devs_supported))); - rc = ipr_alloc_mem(ioa_cfg); if (rc < 0) { dev_err(&pdev->dev, @@ -9572,6 +9711,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev, goto out_msi_disable; } + /* Save away PCI config space for use following IOA reset */ + rc = pci_save_state(pdev); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Failed to save PCI config space\n"); + rc = -EIO; + goto cleanup_nolog; + } + /* * If HRRQ updated interrupt is not masked, or reset alert is set, * the card is in an unknown state and needs a hard reset @@ -9617,9 +9765,9 @@ static int ipr_probe_ioa(struct pci_dev *pdev, } else ioa_cfg->reset = ipr_reset_start_bist; - spin_lock(&ipr_driver_lock); + spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); - spin_unlock(&ipr_driver_lock); + spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); LEAVE; out: @@ -9628,18 +9776,19 @@ out: cleanup_nolog: ipr_free_mem(ioa_cfg); out_msi_disable: + ipr_wait_for_pci_err_recovery(ioa_cfg); if (ioa_cfg->intr_flag == IPR_USE_MSI) pci_disable_msi(pdev); else if (ioa_cfg->intr_flag == IPR_USE_MSIX) pci_disable_msix(pdev); cleanup_nomem: iounmap(ipr_regs); +out_disable: + pci_disable_device(pdev); out_release_regions: pci_release_regions(pdev); out_scsi_host_put: scsi_host_put(host); -out_disable: - pci_disable_device(pdev); goto out; } @@ -9702,6 +9851,7 @@ static void __ipr_remove(struct pci_dev *pdev) unsigned long host_lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); int i; + unsigned long driver_lock_flags; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); @@ -9722,11 +9872,12 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); flush_work(&ioa_cfg->work_q); + INIT_LIST_HEAD(&ioa_cfg->used_res_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); - spin_lock(&ipr_driver_lock); + spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); list_del(&ioa_cfg->queue); - spin_unlock(&ipr_driver_lock); + spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); if (ioa_cfg->sdt_state == ABORT_DUMP) ioa_cfg->sdt_state = WAIT_FOR_DUMP; @@ -9821,8 +9972,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) ioa_cfg->host->max_channel = IPR_VSET_BUS; ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); @@ -9851,8 +10001,7 @@ static void ipr_shutdown(struct pci_dev *pdev) int i; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { ioa_cfg->iopoll_weight = 0; for (i = 1; i < ioa_cfg->hrrq_num; i++) blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); @@ -9953,12 +10102,35 @@ static struct pci_device_id ipr_pci_table[] = { PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); static const struct pci_error_handlers ipr_err_handler = { .error_detected = ipr_pci_error_detected, + .mmio_enabled = ipr_pci_mmio_enabled, .slot_reset = ipr_pci_slot_reset, }; @@ -9992,12 +10164,12 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; - unsigned long flags = 0; + unsigned long flags = 0, driver_lock_flags; if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) return NOTIFY_DONE; - spin_lock(&ipr_driver_lock); + spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags); list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { spin_lock_irqsave(ioa_cfg->host->host_lock, flags); @@ -10015,7 +10187,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } - spin_unlock(&ipr_driver_lock); + spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags); return NOTIFY_OK; } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 21a6ff1ed5c..31ed126f714 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -100,6 +100,17 @@ #define IPR_SUBS_DEV_ID_57D6 0x03FC #define IPR_SUBS_DEV_ID_57D7 0x03FF #define IPR_SUBS_DEV_ID_57D8 0x03FE +#define IPR_SUBS_DEV_ID_57D9 0x046D +#define IPR_SUBS_DEV_ID_57DA 0x04CA +#define IPR_SUBS_DEV_ID_57EB 0x0474 +#define IPR_SUBS_DEV_ID_57EC 0x0475 +#define IPR_SUBS_DEV_ID_57ED 0x0499 +#define IPR_SUBS_DEV_ID_57EE 0x049A +#define IPR_SUBS_DEV_ID_57EF 0x049B +#define IPR_SUBS_DEV_ID_57F0 0x049C +#define IPR_SUBS_DEV_ID_2CCA 0x04C7 +#define IPR_SUBS_DEV_ID_2CD2 0x04C8 +#define IPR_SUBS_DEV_ID_2CCD 0x04C9 #define IPR_NAME "ipr" /* @@ -223,6 +234,7 @@ #define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) #define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) #define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) +#define IPR_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ) #define IPR_PCI_RESET_TIMEOUT (HZ / 2) #define IPR_SIS32_DUMP_TIMEOUT (15 * HZ) #define IPR_SIS64_DUMP_TIMEOUT (40 * HZ) @@ -294,7 +306,7 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR) * Dump literals */ #define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) -#define IPR_FMT3_MAX_IOA_DUMP_SIZE (32 * 1024 * 1024) +#define IPR_FMT3_MAX_IOA_DUMP_SIZE (80 * 1024 * 1024) #define IPR_FMT2_NUM_SDT_ENTRIES 511 #define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF #define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) @@ -304,7 +316,7 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR) * Misc literals */ #define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST -#define IPR_MAX_MSIX_VECTORS 0x5 +#define IPR_MAX_MSIX_VECTORS 0x10 #define IPR_MAX_HRRQ_NUM 0x10 #define IPR_INIT_HRRQ 0x0 @@ -552,7 +564,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */ u8 hob_lbam; u8 hob_lbah; u8 ctl; -}__attribute__ ((packed, aligned(4))); +}__attribute__ ((packed, aligned(2))); struct ipr_ioadl_desc { __be32 flags_and_data_len; @@ -890,6 +902,18 @@ struct ipr_hostrcb_type_01_error { __be32 ioa_data[236]; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb_type_21_error { + __be32 wwn[4]; + u8 res_path[8]; + u8 primary_problem_desc[32]; + u8 second_problem_desc[32]; + __be32 sense_data[8]; + __be32 cdb[4]; + __be32 residual_trans_length; + __be32 length_of_error; + __be32 ioa_data[236]; +}__attribute__((packed, aligned (4))); + struct ipr_hostrcb_type_02_error { struct ipr_vpd ioa_vpd; struct ipr_vpd cfc_vpd; @@ -1119,6 +1143,7 @@ struct ipr_hostrcb64_error { struct ipr_hostrcb_type_ff_error type_ff_error; struct ipr_hostrcb_type_12_error type_12_error; struct ipr_hostrcb_type_17_error type_17_error; + struct ipr_hostrcb_type_21_error type_21_error; struct ipr_hostrcb_type_23_error type_23_error; struct ipr_hostrcb_type_24_error type_24_error; struct ipr_hostrcb_type_30_error type_30_error; @@ -1162,6 +1187,7 @@ struct ipr_hcam { #define IPR_HOST_RCB_OVERLAY_ID_16 0x16 #define IPR_HOST_RCB_OVERLAY_ID_17 0x17 #define IPR_HOST_RCB_OVERLAY_ID_20 0x20 +#define IPR_HOST_RCB_OVERLAY_ID_21 0x21 #define IPR_HOST_RCB_OVERLAY_ID_23 0x23 #define IPR_HOST_RCB_OVERLAY_ID_24 0x24 #define IPR_HOST_RCB_OVERLAY_ID_26 0x26 @@ -1245,6 +1271,7 @@ struct ipr_resource_entry { u8 add_to_ml:1; u8 del_from_ml:1; u8 resetting_device:1; + u8 reset_occurred:1; u32 bus; /* AKA channel */ u32 target; /* AKA id */ @@ -1434,15 +1461,16 @@ struct ipr_ioa_cfg { u8 dump_timeout:1; u8 cfg_locked:1; u8 clear_isr:1; + u8 probe_done:1; u8 revid; /* * Bitmaps for SIS64 generated target values */ - unsigned long *target_ids; - unsigned long *array_ids; - unsigned long *vset_ids; + unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; + unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)]; u16 type; /* CCIN of the card */ @@ -1512,6 +1540,7 @@ struct ipr_ioa_cfg { wait_queue_head_t reset_wait_q; wait_queue_head_t msi_wait_q; + wait_queue_head_t eeh_wait_q; struct ipr_dump *dump; enum ipr_sdt_state sdt_state; diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 9aa86a315a0..52a216f21ae 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -326,10 +326,9 @@ static void ips_scmd_buf_write(struct scsi_cmnd * scmd, void *data, static void ips_scmd_buf_read(struct scsi_cmnd * scmd, void *data, unsigned int count); -static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); -static int ips_host_info(ips_ha_t *, char *, off_t, int); -static void copy_mem_info(IPS_INFOSTR *, char *, int); -static int copy_info(IPS_INFOSTR *, char *, ...); +static int ips_write_info(struct Scsi_Host *, char *, int); +static int ips_show_info(struct seq_file *, struct Scsi_Host *); +static int ips_host_info(ips_ha_t *, struct seq_file *); static int ips_abort_init(ips_ha_t * ha, int index); static int ips_init_phase2(int index); @@ -367,13 +366,15 @@ static struct scsi_host_template ips_driver_template = { .eh_abort_handler = ips_eh_abort, .eh_host_reset_handler = ips_eh_reset, .proc_name = "ips", - .proc_info = ips_proc_info, + .show_info = ips_show_info, + .write_info = ips_write_info, .slave_configure = ips_slave_configure, .bios_param = ips_biosparam, .this_id = -1, .sg_tablesize = IPS_MAX_SG, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, }; @@ -1433,25 +1434,12 @@ ips_info(struct Scsi_Host *SH) return (bp); } -/****************************************************************************/ -/* */ -/* Routine Name: ips_proc_info */ -/* */ -/* Routine Description: */ -/* */ -/* The passthru interface for the driver */ -/* */ -/****************************************************************************/ static int -ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, - int length, int func) +ips_write_info(struct Scsi_Host *host, char *buffer, int length) { int i; - int ret; ips_ha_t *ha = NULL; - METHOD_TRACE("ips_proc_info", 1); - /* Find our host structure */ for (i = 0; i < ips_next_controller; i++) { if (ips_sh[i]) { @@ -1465,18 +1453,29 @@ ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, if (!ha) return (-EINVAL); - if (func) { - /* write */ - return (0); - } else { - /* read */ - if (start) - *start = buffer; + return 0; +} - ret = ips_host_info(ha, buffer, offset, length); +static int +ips_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + int i; + ips_ha_t *ha = NULL; - return (ret); + /* Find our host structure */ + for (i = 0; i < ips_next_controller; i++) { + if (ips_sh[i]) { + if (ips_sh[i] == host) { + ha = (ips_ha_t *) ips_sh[i]->hostdata; + break; + } + } } + + if (!ha) + return (-EINVAL); + + return ips_host_info(ha, m); } /*--------------------------------------------------------------------------*/ @@ -2035,183 +2034,113 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb) /* */ /****************************************************************************/ static int -ips_host_info(ips_ha_t * ha, char *ptr, off_t offset, int len) +ips_host_info(ips_ha_t *ha, struct seq_file *m) { - IPS_INFOSTR info; - METHOD_TRACE("ips_host_info", 1); - info.buffer = ptr; - info.length = len; - info.offset = offset; - info.pos = 0; - info.localpos = 0; - - copy_info(&info, "\nIBM ServeRAID General Information:\n\n"); + seq_printf(m, "\nIBM ServeRAID General Information:\n\n"); if ((le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) && (le16_to_cpu(ha->nvram->adapter_type) != 0)) - copy_info(&info, "\tController Type : %s\n", + seq_printf(m, "\tController Type : %s\n", ips_adapter_name[ha->ad_type - 1]); else - copy_info(&info, + seq_printf(m, "\tController Type : Unknown\n"); if (ha->io_addr) - copy_info(&info, - "\tIO region : 0x%lx (%d bytes)\n", + seq_printf(m, + "\tIO region : 0x%x (%d bytes)\n", ha->io_addr, ha->io_len); if (ha->mem_addr) { - copy_info(&info, - "\tMemory region : 0x%lx (%d bytes)\n", + seq_printf(m, + "\tMemory region : 0x%x (%d bytes)\n", ha->mem_addr, ha->mem_len); - copy_info(&info, + seq_printf(m, "\tShared memory address : 0x%lx\n", - ha->mem_ptr); + (unsigned long)ha->mem_ptr); } - copy_info(&info, "\tIRQ number : %d\n", ha->pcidev->irq); + seq_printf(m, "\tIRQ number : %d\n", ha->pcidev->irq); /* For the Next 3 lines Check for Binary 0 at the end and don't include it if it's there. */ /* That keeps everything happy for "text" operations on the proc file. */ if (le32_to_cpu(ha->nvram->signature) == IPS_NVRAM_P5_SIG) { if (ha->nvram->bios_low[3] == 0) { - copy_info(&info, - "\tBIOS Version : %c%c%c%c%c%c%c\n", - ha->nvram->bios_high[0], ha->nvram->bios_high[1], - ha->nvram->bios_high[2], ha->nvram->bios_high[3], - ha->nvram->bios_low[0], ha->nvram->bios_low[1], - ha->nvram->bios_low[2]); + seq_printf(m, + "\tBIOS Version : %c%c%c%c%c%c%c\n", + ha->nvram->bios_high[0], ha->nvram->bios_high[1], + ha->nvram->bios_high[2], ha->nvram->bios_high[3], + ha->nvram->bios_low[0], ha->nvram->bios_low[1], + ha->nvram->bios_low[2]); } else { - copy_info(&info, - "\tBIOS Version : %c%c%c%c%c%c%c%c\n", - ha->nvram->bios_high[0], ha->nvram->bios_high[1], - ha->nvram->bios_high[2], ha->nvram->bios_high[3], - ha->nvram->bios_low[0], ha->nvram->bios_low[1], - ha->nvram->bios_low[2], ha->nvram->bios_low[3]); + seq_printf(m, + "\tBIOS Version : %c%c%c%c%c%c%c%c\n", + ha->nvram->bios_high[0], ha->nvram->bios_high[1], + ha->nvram->bios_high[2], ha->nvram->bios_high[3], + ha->nvram->bios_low[0], ha->nvram->bios_low[1], + ha->nvram->bios_low[2], ha->nvram->bios_low[3]); } } if (ha->enq->CodeBlkVersion[7] == 0) { - copy_info(&info, - "\tFirmware Version : %c%c%c%c%c%c%c\n", - ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], - ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], - ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], - ha->enq->CodeBlkVersion[6]); + seq_printf(m, + "\tFirmware Version : %c%c%c%c%c%c%c\n", + ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], + ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], + ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], + ha->enq->CodeBlkVersion[6]); } else { - copy_info(&info, - "\tFirmware Version : %c%c%c%c%c%c%c%c\n", - ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], - ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], - ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], - ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); + seq_printf(m, + "\tFirmware Version : %c%c%c%c%c%c%c%c\n", + ha->enq->CodeBlkVersion[0], ha->enq->CodeBlkVersion[1], + ha->enq->CodeBlkVersion[2], ha->enq->CodeBlkVersion[3], + ha->enq->CodeBlkVersion[4], ha->enq->CodeBlkVersion[5], + ha->enq->CodeBlkVersion[6], ha->enq->CodeBlkVersion[7]); } if (ha->enq->BootBlkVersion[7] == 0) { - copy_info(&info, - "\tBoot Block Version : %c%c%c%c%c%c%c\n", - ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], - ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], - ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], - ha->enq->BootBlkVersion[6]); + seq_printf(m, + "\tBoot Block Version : %c%c%c%c%c%c%c\n", + ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], + ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], + ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], + ha->enq->BootBlkVersion[6]); } else { - copy_info(&info, - "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", - ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], - ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], - ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], - ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); + seq_printf(m, + "\tBoot Block Version : %c%c%c%c%c%c%c%c\n", + ha->enq->BootBlkVersion[0], ha->enq->BootBlkVersion[1], + ha->enq->BootBlkVersion[2], ha->enq->BootBlkVersion[3], + ha->enq->BootBlkVersion[4], ha->enq->BootBlkVersion[5], + ha->enq->BootBlkVersion[6], ha->enq->BootBlkVersion[7]); } - copy_info(&info, "\tDriver Version : %s%s\n", + seq_printf(m, "\tDriver Version : %s%s\n", IPS_VERSION_HIGH, IPS_VERSION_LOW); - copy_info(&info, "\tDriver Build : %d\n", + seq_printf(m, "\tDriver Build : %d\n", IPS_BUILD_IDENT); - copy_info(&info, "\tMax Physical Devices : %d\n", + seq_printf(m, "\tMax Physical Devices : %d\n", ha->enq->ucMaxPhysicalDevices); - copy_info(&info, "\tMax Active Commands : %d\n", + seq_printf(m, "\tMax Active Commands : %d\n", ha->max_cmds); - copy_info(&info, "\tCurrent Queued Commands : %d\n", + seq_printf(m, "\tCurrent Queued Commands : %d\n", ha->scb_waitlist.count); - copy_info(&info, "\tCurrent Active Commands : %d\n", + seq_printf(m, "\tCurrent Active Commands : %d\n", ha->scb_activelist.count - ha->num_ioctl); - copy_info(&info, "\tCurrent Queued PT Commands : %d\n", + seq_printf(m, "\tCurrent Queued PT Commands : %d\n", ha->copp_waitlist.count); - copy_info(&info, "\tCurrent Active PT Commands : %d\n", + seq_printf(m, "\tCurrent Active PT Commands : %d\n", ha->num_ioctl); - copy_info(&info, "\n"); - - return (info.localpos); -} - -/****************************************************************************/ -/* */ -/* Routine Name: copy_mem_info */ -/* */ -/* Routine Description: */ -/* */ -/* Copy data into an IPS_INFOSTR structure */ -/* */ -/****************************************************************************/ -static void -copy_mem_info(IPS_INFOSTR * info, char *data, int len) -{ - METHOD_TRACE("copy_mem_info", 1); - - if (info->pos + len < info->offset) { - info->pos += len; - return; - } - - if (info->pos < info->offset) { - data += (info->offset - info->pos); - len -= (info->offset - info->pos); - info->pos += (info->offset - info->pos); - } - - if (info->localpos + len > info->length) - len = info->length - info->localpos; + seq_printf(m, "\n"); - if (len > 0) { - memcpy(info->buffer + info->localpos, data, len); - info->pos += len; - info->localpos += len; - } -} - -/****************************************************************************/ -/* */ -/* Routine Name: copy_info */ -/* */ -/* Routine Description: */ -/* */ -/* printf style wrapper for an info structure */ -/* */ -/****************************************************************************/ -static int -copy_info(IPS_INFOSTR * info, char *fmt, ...) -{ - va_list args; - char buf[128]; - int len; - - METHOD_TRACE("copy_info", 1); - - va_start(args, fmt); - len = vsprintf(buf, fmt, args); - va_end(args); - - copy_mem_info(info, buf, len); - - return (len); + return 0; } /****************************************************************************/ diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h index f2df0593332..45b9566b928 100644 --- a/drivers/scsi/ips.h +++ b/drivers/scsi/ips.h @@ -416,7 +416,6 @@ /* * Scsi_Host Template */ - static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]); static int ips_slave_configure(struct scsi_device *SDptr); @@ -959,14 +958,6 @@ typedef union { IPS_ENH_SG_LIST *enh_list; } IPS_SG_LIST; -typedef struct _IPS_INFOSTR { - char *buffer; - int length; - int offset; - int pos; - int localpos; -} IPS_INFOSTR; - typedef struct { char *option_name; int *option_flag; diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 4911310a38f..22a9bb1abae 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost) } #define for_each_isci_host(id, ihost, pdev) \ - for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ - id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ - ihost = to_pci_info(pdev)->hosts[++id]) + for (id = 0; id < SCI_MAX_CONTROLLERS && \ + (ihost = to_pci_info(pdev)->hosts[id]); id++) static inline void wait_for_start(struct isci_host *ihost) { diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 2839baa82a5..695b34e9154 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -66,7 +66,7 @@ #include "probe_roms.h" #define MAJ 1 -#define MIN 1 +#define MIN 2 #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) @@ -721,7 +721,7 @@ static void isci_pci_remove(struct pci_dev *pdev) } } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int isci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -770,18 +770,16 @@ static int isci_resume(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume); -#endif static struct pci_driver isci_pci_driver = { .name = DRV_NAME, .id_table = isci_id_table, .probe = isci_pci_probe, .remove = isci_pci_remove, -#ifdef CONFIG_PM .driver.pm = &isci_pm_ops, -#endif }; static __init int isci_init(void) diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index cd962da4a57..ac879745ef8 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -311,9 +311,9 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, &ihost->phys[phy_index]); assigned_phy_mask |= (1 << phy_index); + phy_index++; } - phy_index++; } return sci_port_configuration_agent_validate_ports(ihost, port_agent); @@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost, SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); } else { /* the phy is already the part of the port */ - u32 port_state = iport->sm.current_state_id; - - /* if the PORT'S state is resetting then the link up is from - * port hard reset in this case, we need to tell the port - * that link up is recieved - */ - BUG_ON(port_state != SCI_PORT_RESETTING); port_agent->phy_ready_mask |= 1 << phy_index; sci_port_link_up(iport, iphy); } diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index c3aa6c5457b..cc51f38b116 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s struct isci_host *ihost = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); @@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, @@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref) clear_bit(IDEV_STOP_PENDING, &idev->flags); clear_bit(IDEV_IO_READY, &idev->flags); clear_bit(IDEV_GONE, &idev->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(IDEV_ALLOCATED, &idev->flags); wake_up(&ihost->eventq); } diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 7674caae1d8..47a013fffae 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte static inline bool dev_is_expander(struct domain_device *dev) { - return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; + return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE; } static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 9594ab62702..56e38096f0c 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -184,8 +184,8 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) cmd_iu->task_attr = task->ssp_task.task_attr; cmd_iu->_r_c = 0; - sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, - sizeof(task->ssp_task.cdb) / sizeof(u32)); + sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd, + (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); } static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) @@ -2723,13 +2723,9 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_ memcpy(resp->ending_fis, fis, sizeof(*fis)); ts->buf_valid_size = sizeof(*resp); - /* If the device fault bit is set in the status register, then - * set the sense data and return. - */ - if (fis->status & ATA_DF) + /* If an error is flagged let libata decode the fis */ + if (ac_err_mask(fis->status)) ts->stat = SAS_PROTO_RESPONSE; - else if (fis->status & ATA_ERR) - ts->stat = SAM_STAT_CHECK_CONDITION; else ts->stat = SAM_STAT_GOOD; @@ -2978,7 +2974,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) /* all unaccelerated request types (non ssp or ncq) handled with * substates */ - if (!task && dev->dev_type == SAS_END_DEV) { + if (!task && dev->dev_type == SAS_END_DEVICE) { state = SCI_REQ_TASK_WAIT_TC_COMP; } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { state = SCI_REQ_SMP_WAIT_RESP; @@ -3101,7 +3097,7 @@ sci_io_request_construct(struct isci_host *ihost, if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) /* pass */; else if (dev_is_sata(dev)) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); @@ -3125,7 +3121,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); - if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { + if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { set_bit(IREQ_TMF, &ireq->flags); memset(ireq->tc, 0, sizeof(struct scu_task_context)); diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index b6f19a1db78..5d6fda72d65 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, } /* XXX convert to get this from task->tproto like other drivers */ - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { isci_tmf->proto = SAS_PROTOCOL_SSP; status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) @@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task) struct isci_tmf tmf; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; + int target_done_already = 0; /* Get the isci_request reference from the task. Note that * this check does not depend on the pending request list @@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task) /* If task is already done, the request isn't valid */ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && (task->task_state_flags & SAS_TASK_AT_INITIATOR) && - old_request) + old_request) { idev = isci_get_device(task->dev->lldd_dev); - + target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET, + &old_request->flags); + } spin_unlock(&task->task_state_lock); spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task) if (task->task_proto == SAS_PROTOCOL_SMP || sas_protocol_ata(task->task_proto) || - test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || + target_done_already || test_bit(IDEV_GONE, &idev->flags)) { spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -798,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) /* XXX: need to cleanup any ireqs targeting this * domain_device */ - ret = TMF_RESP_FUNC_COMPLETE; + ret = -ENODEV; goto out; } diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index 14c1c8f6a95..680bf6f0ce7 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -490,5 +490,6 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset) iscsi_boot_remove_kobj(boot_kobj); kset_unregister(boot_kset->kset); + kfree(boot_kset); } EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 1b91ca0dc1e..a669f2d11c3 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -116,6 +116,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk) struct iscsi_conn *conn = sk->sk_user_data; if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && + (conn->session->state != ISCSI_STATE_LOGGING_OUT) && !atomic_read(&sk->sk_rmem_alloc)) { ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n"); iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE); @@ -124,7 +125,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk) return 0; } -static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) +static void iscsi_sw_tcp_data_ready(struct sock *sk) { struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; @@ -243,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn) sk->sk_data_ready = tcp_sw_conn->old_data_ready; sk->sk_state_change = tcp_sw_conn->old_state_change; sk->sk_write_space = tcp_sw_conn->old_write_space; - sk->sk_no_check = 0; + sk->sk_no_check_tx = 0; write_unlock_bh(&sk->sk_callback_lock); } @@ -370,17 +371,24 @@ static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn) static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; - int rc; + unsigned long pflags = current->flags; + int rc = 0; + + current->flags |= PF_MEMALLOC; while (iscsi_sw_tcp_xmit_qlen(conn)) { rc = iscsi_sw_tcp_xmit(conn); - if (rc == 0) - return -EAGAIN; + if (rc == 0) { + rc = -EAGAIN; + break; + } if (rc < 0) - return rc; + break; + rc = 0; } - return 0; + tsk_restore_flags(current, pflags, PF_MEMALLOC); + return rc; } /* @@ -585,9 +593,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn) iscsi_sw_tcp_conn_restore_callbacks(conn); sock_put(sock->sk); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); tcp_sw_conn->sock = NULL; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); sockfd_put(sock); } @@ -655,16 +663,17 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, if (err) goto free_socket; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); /* bind iSCSI connection and socket */ tcp_sw_conn->sock = sock; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); /* setup Socket parameters */ sk = sock->sk; sk->sk_reuse = SK_CAN_REUSE; sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ sk->sk_allocation = GFP_ATOMIC; + sk_set_memalloc(sk); iscsi_sw_tcp_conn_set_callbacks(conn); tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage; @@ -717,14 +726,14 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, switch(param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: - spin_lock_bh(&conn->session->lock); + spin_lock_bh(&conn->session->frwd_lock); if (!tcp_sw_conn || !tcp_sw_conn->sock) { - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->frwd_lock); return -ENOTCONN; } rc = kernel_getpeername(tcp_sw_conn->sock, (struct sockaddr *)&addr, &len); - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->frwd_lock); if (rc) return rc; @@ -750,23 +759,26 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: - spin_lock_bh(&session->lock); + if (!session) + return -ENOTCONN; + + spin_lock_bh(&session->frwd_lock); conn = session->leadconn; if (!conn) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return -ENOTCONN; } tcp_conn = conn->dd_data; tcp_sw_conn = tcp_conn->dd_data; if (!tcp_sw_conn->sock) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return -ENOTCONN; } rc = kernel_getsockname(tcp_sw_conn->sock, (struct sockaddr *)&addr, &len); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); if (rc) return rc; diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 666fe09378f..f42ecb238af 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -40,7 +40,7 @@ struct iscsi_sw_tcp_conn { struct iscsi_sw_tcp_send out; /* old values for socket callbacks */ - void (*old_data_ready)(struct sock *, int); + void (*old_data_ready)(struct sock *); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8e561e6a557..880a9068ca1 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport) } /** - * fc_disc_init() - Initialize the discovery layer for a local port - * @lport: The local port that needs the discovery layer to be initialized + * fc_disc_config() - Configure the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be configured + * @priv: Private data structre for users of the discovery layer */ -int fc_disc_init(struct fc_lport *lport) +void fc_disc_config(struct fc_lport *lport, void *priv) { - struct fc_disc *disc; + struct fc_disc *disc = &lport->disc; if (!lport->tt.disc_start) lport->tt.disc_start = fc_disc_start; @@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport) lport->tt.disc_recv_req = fc_disc_recv_req; disc = &lport->disc; + + disc->priv = priv; +} +EXPORT_SYMBOL(fc_disc_config); + +/** + * fc_disc_init() - Initialize the discovery layer for a local port + * @lport: The local port that needs the discovery layer to be initialized + */ +void fc_disc_init(struct fc_lport *lport) +{ + struct fc_disc *disc = &lport->disc; + INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); mutex_init(&disc->disc_mutex); INIT_LIST_HEAD(&disc->rports); - - disc->priv = lport; - - return 0; } EXPORT_SYMBOL(fc_disc_init); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index c772d8d2715..1b3a0947345 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/export.h> +#include <linux/log2.h> #include <scsi/fc/fc_fc2.h> @@ -303,10 +304,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp, fr_eof(fp) = FC_EOF_N; } - /* - * Initialize remainig fh fields - * from fc_fill_fc_hdr - */ + /* Initialize remaining fh fields from fc_fill_fc_hdr */ fh->fh_ox_id = htons(ep->oxid); fh->fh_rx_id = htons(ep->rxid); fh->fh_seq_id = ep->seq.id; @@ -337,7 +335,7 @@ static void fc_exch_release(struct fc_exch *ep) * fc_exch_timer_cancel() - cancel exch timer * @ep: The exchange whose timer to be canceled */ -static inline void fc_exch_timer_cancel(struct fc_exch *ep) +static inline void fc_exch_timer_cancel(struct fc_exch *ep) { if (cancel_delayed_work(&ep->timeout_work)) { FC_EXCH_DBG(ep, "Exchange timer canceled\n"); @@ -362,9 +360,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); - if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, - msecs_to_jiffies(timer_msec))) - fc_exch_hold(ep); /* hold for timer */ + fc_exch_hold(ep); /* hold for timer */ + if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, + msecs_to_jiffies(timer_msec))) + fc_exch_release(ep); } /** @@ -382,6 +381,8 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec) /** * fc_exch_done_locked() - Complete an exchange with the exchange lock held * @ep: The exchange that is complete + * + * Note: May sleep if invoked from outside a response handler. */ static int fc_exch_done_locked(struct fc_exch *ep) { @@ -393,7 +394,6 @@ static int fc_exch_done_locked(struct fc_exch *ep) * ep, and in that case we only clear the resp and set it as * complete, so it can be reused by the timer to send the rrq. */ - ep->resp = NULL; if (ep->state & FC_EX_DONE) return rc; ep->esb_stat |= ESB_ST_COMPLETE; @@ -463,23 +463,23 @@ static void fc_exch_delete(struct fc_exch *ep) fc_exch_release(ep); /* drop hold for exch in mp */ } -/** - * fc_seq_send() - Send a frame using existing sequence/exchange pair - * @lport: The local port that the exchange will be sent on - * @sp: The sequence to be sent - * @fp: The frame to be sent on the exchange - */ -static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, - struct fc_frame *fp) +static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, + struct fc_frame *fp) { struct fc_exch *ep; struct fc_frame_header *fh = fc_frame_header_get(fp); - int error; + int error = -ENXIO; u32 f_ctl; u8 fh_type = fh->fh_type; ep = fc_seq_exch(sp); - WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT); + + if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) { + fc_frame_free(fp); + goto out; + } + + WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT)); f_ctl = ntoh24(fh->fh_f_ctl); fc_exch_setup_hdr(ep, fp, f_ctl); @@ -502,17 +502,37 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, error = lport->tt.frame_send(lport, fp); if (fh_type == FC_TYPE_BLS) - return error; + goto out; /* * Update the exchange and sequence flags, * assuming all frames for the sequence have been sent. * We can only be called to send once for each sequence. */ - spin_lock_bh(&ep->ex_lock); ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */ if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat &= ~ESB_ST_SEQ_INIT; +out: + return error; +} + +/** + * fc_seq_send() - Send a frame using existing sequence/exchange pair + * @lport: The local port that the exchange will be sent on + * @sp: The sequence to be sent + * @fp: The frame to be sent on the exchange + * + * Note: The frame will be freed either by a direct call to fc_frame_free(fp) + * or indirectly by calling libfc_function_template.frame_send(). + */ +static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, + struct fc_frame *fp) +{ + struct fc_exch *ep; + int error; + ep = fc_seq_exch(sp); + spin_lock_bh(&ep->ex_lock); + error = fc_seq_send_locked(lport, sp, fp); spin_unlock_bh(&ep->ex_lock); return error; } @@ -570,6 +590,8 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) /* * Set the response handler for the exchange associated with a sequence. + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_seq_set_resp(struct fc_seq *sp, void (*resp)(struct fc_seq *, struct fc_frame *, @@ -577,8 +599,18 @@ static void fc_seq_set_resp(struct fc_seq *sp, void *arg) { struct fc_exch *ep = fc_seq_exch(sp); + DEFINE_WAIT(wait); spin_lock_bh(&ep->ex_lock); + while (ep->resp_active && ep->resp_task != current) { + prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_bh(&ep->ex_lock); + + schedule(); + + spin_lock_bh(&ep->ex_lock); + } + finish_wait(&ep->resp_wq, &wait); ep->resp = resp; ep->arg = arg; spin_unlock_bh(&ep->ex_lock); @@ -611,27 +643,31 @@ static int fc_exch_abort_locked(struct fc_exch *ep, if (!sp) return -ENOMEM; - ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL; if (timer_msec) fc_exch_timer_set_locked(ep, timer_msec); - /* - * If not logged into the fabric, don't send ABTS but leave - * sequence active until next timeout. - */ - if (!ep->sid) - return 0; - - /* - * Send an abort for the sequence that timed out. - */ - fp = fc_frame_alloc(ep->lp, 0); - if (fp) { - fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, - FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); - error = fc_seq_send(ep->lp, sp, fp); - } else - error = -ENOBUFS; + if (ep->sid) { + /* + * Send an abort for the sequence that timed out. + */ + fp = fc_frame_alloc(ep->lp, 0); + if (fp) { + ep->esb_stat |= ESB_ST_SEQ_INIT; + fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, + FC_TYPE_BLS, FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + error = fc_seq_send_locked(ep->lp, sp, fp); + } else { + error = -ENOBUFS; + } + } else { + /* + * If not logged into the fabric, don't send ABTS but leave + * sequence active until next timeout. + */ + error = 0; + } + ep->esb_stat |= ESB_ST_ABNORMAL; return error; } @@ -658,6 +694,61 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp, } /** + * fc_invoke_resp() - invoke ep->resp() + * + * Notes: + * It is assumed that after initialization finished (this means the + * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are + * modified only via fc_seq_set_resp(). This guarantees that none of these + * two variables changes if ep->resp_active > 0. + * + * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when + * this function is invoked, the first spin_lock_bh() call in this function + * will wait until fc_seq_set_resp() has finished modifying these variables. + * + * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that + * ep->resp() won't be invoked after fc_exch_done() has returned. + * + * The response handler itself may invoke fc_exch_done(), which will clear the + * ep->resp pointer. + * + * Return value: + * Returns true if and only if ep->resp has been invoked. + */ +static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, + struct fc_frame *fp) +{ + void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); + void *arg; + bool res = false; + + spin_lock_bh(&ep->ex_lock); + ep->resp_active++; + if (ep->resp_task != current) + ep->resp_task = !ep->resp_task ? current : NULL; + resp = ep->resp; + arg = ep->arg; + spin_unlock_bh(&ep->ex_lock); + + if (resp) { + resp(sp, fp, arg); + res = true; + } else if (!IS_ERR(fp)) { + fc_frame_free(fp); + } + + spin_lock_bh(&ep->ex_lock); + if (--ep->resp_active == 0) + ep->resp_task = NULL; + spin_unlock_bh(&ep->ex_lock); + + if (ep->resp_active == 0) + wake_up(&ep->resp_wq); + + return res; +} + +/** * fc_exch_timeout() - Handle exchange timer expiration * @work: The work_struct identifying the exchange that timed out */ @@ -666,8 +757,6 @@ static void fc_exch_timeout(struct work_struct *work) struct fc_exch *ep = container_of(work, struct fc_exch, timeout_work.work); struct fc_seq *sp = &ep->seq; - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *arg; u32 e_stat; int rc = 1; @@ -685,16 +774,13 @@ static void fc_exch_timeout(struct work_struct *work) fc_exch_rrq(ep); goto done; } else { - resp = ep->resp; - arg = ep->arg; - ep->resp = NULL; if (e_stat & ESB_ST_ABNORMAL) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); if (!rc) fc_exch_delete(ep); - if (resp) - resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); + fc_seq_set_resp(sp, NULL, ep->arg); fc_seq_exch_abort(sp, 2 * ep->r_a_tov); goto done; } @@ -781,6 +867,8 @@ hit: ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */ ep->rxid = FC_XID_UNKNOWN; ep->class = mp->class; + ep->resp_active = 0; + init_waitqueue_head(&ep->resp_wq); INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout); out: return ep; @@ -827,8 +915,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); spin_lock_bh(&pool->lock); ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); - if (ep && ep->xid == xid) + if (ep) { + WARN_ON(ep->xid != xid); fc_exch_hold(ep); + } spin_unlock_bh(&pool->lock); } return ep; @@ -839,6 +929,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid) * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and * the memory allocated for the related objects may be freed. * @sp: The sequence that has completed + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_exch_done(struct fc_seq *sp) { @@ -848,6 +940,8 @@ static void fc_exch_done(struct fc_seq *sp) spin_lock_bh(&ep->ex_lock); rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_seq_set_resp(sp, NULL, ep->arg); if (!rc) fc_exch_delete(ep); } @@ -976,6 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, } } + spin_lock_bh(&ep->ex_lock); /* * At this point, we have the exchange held. * Find or create the sequence. @@ -1003,11 +1098,11 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, * sending RSP, hence write request on other * end never finishes. */ - spin_lock_bh(&ep->ex_lock); sp->ssb_stat |= SSB_ST_RESP; sp->id = fh->fh_seq_id; - spin_unlock_bh(&ep->ex_lock); } else { + spin_unlock_bh(&ep->ex_lock); + /* sequence/exch should exist */ reject = FC_RJT_SEQ_ID; goto rel; @@ -1018,6 +1113,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport, if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); fr_seq(fp) = sp; out: @@ -1132,7 +1228,7 @@ static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp, f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT; f_ctl |= ep->f_ctl; fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0); - fc_seq_send(ep->lp, sp, fp); + fc_seq_send_locked(ep->lp, sp, fp); } /** @@ -1280,21 +1376,23 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) if (!ep) goto reject; + + fp = fc_frame_alloc(ep->lp, sizeof(*ap)); + if (!fp) + goto free; + spin_lock_bh(&ep->ex_lock); if (ep->esb_stat & ESB_ST_COMPLETE) { spin_unlock_bh(&ep->ex_lock); + + fc_frame_free(fp); goto reject; } - if (!(ep->esb_stat & ESB_ST_REC_QUAL)) + if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { + ep->esb_stat |= ESB_ST_REC_QUAL; fc_exch_hold(ep); /* hold for REC_QUAL */ - ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL; - fc_exch_timer_set_locked(ep, ep->r_a_tov); - - fp = fc_frame_alloc(ep->lp, sizeof(*ap)); - if (!fp) { - spin_unlock_bh(&ep->ex_lock); - goto free; } + fc_exch_timer_set_locked(ep, ep->r_a_tov); fh = fc_frame_header_get(fp); ap = fc_frame_payload_get(fp, sizeof(*ap)); memset(ap, 0, sizeof(*ap)); @@ -1307,15 +1405,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) ap->ba_low_seq_cnt = htons(sp->cnt); } sp = fc_seq_start_next_locked(sp); - spin_unlock_bh(&ep->ex_lock); fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); + ep->esb_stat |= ESB_ST_ABNORMAL; + spin_unlock_bh(&ep->ex_lock); + +free: fc_frame_free(rx_fp); return; reject: fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); -free: - fc_frame_free(rx_fp); + goto free; } /** @@ -1405,9 +1505,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp, * If new exch resp handler is valid then call that * first. */ - if (ep->resp) - ep->resp(sp, fp, ep->arg); - else + if (!fc_invoke_resp(ep, sp, fp)) lport->tt.lport_recv(lport, fp); fc_exch_release(ep); /* release from lookup */ } else { @@ -1431,8 +1529,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) struct fc_exch *ep; enum fc_sof sof; u32 f_ctl; - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *ex_resp_arg; int rc; ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); @@ -1467,19 +1563,19 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) f_ctl = ntoh24(fh->fh_f_ctl); fr_seq(fp) = sp; + + spin_lock_bh(&ep->ex_lock); if (f_ctl & FC_FC_SEQ_INIT) ep->esb_stat |= ESB_ST_SEQ_INIT; + spin_unlock_bh(&ep->ex_lock); if (fc_sof_needs_ack(sof)) fc_seq_send_ack(sp, fp); - resp = ep->resp; - ex_resp_arg = ep->arg; if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T && (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { spin_lock_bh(&ep->ex_lock); - resp = ep->resp; rc = fc_exch_done_locked(ep); WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); @@ -1500,10 +1596,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) * If new exch resp handler is valid then call that * first. */ - if (resp) - resp(sp, fp, ex_resp_arg); - else - fc_frame_free(fp); + fc_invoke_resp(ep, sp, fp); + fc_exch_release(ep); return; rel: @@ -1542,8 +1636,6 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) */ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) { - void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); - void *ex_resp_arg; struct fc_frame_header *fh; struct fc_ba_acc *ap; struct fc_seq *sp; @@ -1556,7 +1648,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) fc_exch_rctl_name(fh->fh_r_ctl)); if (cancel_delayed_work_sync(&ep->timeout_work)) { - FC_EXCH_DBG(ep, "Exchange timer canceled\n"); + FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n"); fc_exch_release(ep); /* release from pending timer hold */ } @@ -1588,9 +1680,6 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) break; } - resp = ep->resp; - ex_resp_arg = ep->arg; - /* do we need to do some other checks here. Can we reuse more of * fc_exch_recv_seq_resp */ @@ -1602,17 +1691,14 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); if (!rc) fc_exch_delete(ep); - - if (resp) - resp(sp, fp, ex_resp_arg); - else - fc_frame_free(fp); - + fc_invoke_resp(ep, sp, fp); if (has_rec) fc_exch_timer_set(ep, ep->r_a_tov); - + fc_exch_release(ep); } /** @@ -1651,7 +1737,7 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp) break; default: if (ep) - FC_EXCH_DBG(ep, "BLS rctl %x - %s received", + FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n", fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl)); break; @@ -1734,32 +1820,33 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason, /** * fc_exch_reset() - Reset an exchange * @ep: The exchange to be reset + * + * Note: May sleep if invoked from outside a response handler. */ static void fc_exch_reset(struct fc_exch *ep) { struct fc_seq *sp; - void (*resp)(struct fc_seq *, struct fc_frame *, void *); - void *arg; int rc = 1; spin_lock_bh(&ep->ex_lock); fc_exch_abort_locked(ep, 0); ep->state |= FC_EX_RST_CLEANUP; fc_exch_timer_cancel(ep); - resp = ep->resp; - ep->resp = NULL; if (ep->esb_stat & ESB_ST_REC_QUAL) atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */ ep->esb_stat &= ~ESB_ST_REC_QUAL; - arg = ep->arg; sp = &ep->seq; rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + + fc_exch_hold(ep); + if (!rc) fc_exch_delete(ep); - if (resp) - resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); + fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); + fc_seq_set_resp(sp, NULL, ep->arg); + fc_exch_release(ep); } /** @@ -1945,13 +2032,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg) switch (op) { case ELS_LS_RJT: - FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); + FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n"); /* fall through */ case ELS_LS_ACC: goto cleanup; default: - FC_EXCH_DBG(aborted_ep, "unexpected response op %x " - "for RRQ", op); + FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n", + op); return; } @@ -2522,13 +2609,8 @@ int fc_setup_exch_mgr(void) * cpu on which exchange originated by simple bitwise * AND operation between fc_cpu_mask and exchange id. */ - fc_cpu_mask = 1; - fc_cpu_order = 0; - while (fc_cpu_mask < nr_cpu_ids) { - fc_cpu_mask <<= 1; - fc_cpu_order++; - } - fc_cpu_mask--; + fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); + fc_cpu_mask = (1 << fc_cpu_order) - 1; fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue"); if (!fc_exch_workqueue) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 09c81b2f216..1d7e76e8b44 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -902,7 +902,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) /* * Check for missing or extra data frames. */ - if (unlikely(fsp->xfer_len != expected_len)) { + if (unlikely(fsp->cdb_status == SAM_STAT_GOOD && + fsp->xfer_len != expected_len)) { if (fsp->xfer_len < expected_len) { /* * Some data may be queued locally, @@ -955,12 +956,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) * Test for transport underrun, independent of response * underrun status. */ - if (fsp->xfer_len < fsp->data_len && !fsp->io_status && + if (fsp->cdb_status == SAM_STAT_GOOD && + fsp->xfer_len < fsp->data_len && !fsp->io_status && (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || - fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { + fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) fsp->status_code = FC_DATA_UNDRUN; - fsp->io_status = 0; - } } seq = fsp->seq_ptr; @@ -2043,7 +2043,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) spin_unlock_irqrestore(&si->scsi_queue_lock, flags); return SUCCESS; } - /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ + /* grab a ref so the fsp and sc_cmd cannot be released from under us */ fc_fcp_pkt_hold(fsp); spin_unlock_irqrestore(&si->scsi_queue_lock, flags); diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f04d15c67df..e01a29863c3 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -516,7 +516,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport, * @lport: The local port receiving the LOGO * @fp: The LOGO request frame * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) @@ -1088,7 +1088,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) { unsigned long delay = 0; FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", - PTR_ERR(fp), fc_lport_state(lport), + IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport), lport->retry_count); if (PTR_ERR(fp) == -FC_EX_CLOSED) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index d518d17e940..589ff9aedd3 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -926,6 +926,20 @@ err: kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); } +static bool +fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata) +{ + if (rdata->ids.roles == FC_PORT_ROLE_UNKNOWN) + return true; + if ((rdata->ids.roles & FC_PORT_ROLE_FCP_TARGET) && + (lport->service_params & FCP_SPPF_INIT_FCN)) + return true; + if ((rdata->ids.roles & FC_PORT_ROLE_FCP_INITIATOR) && + (lport->service_params & FCP_SPPF_TARG_FCN)) + return true; + return false; +} + /** * fc_rport_enter_plogi() - Send Port Login (PLOGI) request * @rdata: The remote port to send a PLOGI to @@ -938,6 +952,12 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) struct fc_lport *lport = rdata->local_port; struct fc_frame *fp; + if (!fc_rport_compatible_roles(lport, rdata)) { + FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n"); + fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); + return; + } + FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n", fc_rport_state(rdata)); @@ -1646,6 +1666,13 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, rjt_data.explan = ELS_EXPL_NONE; goto reject; } + if (!fc_rport_compatible_roles(lport, rdata)) { + FC_RPORT_DBG(rdata, "Received PLOGI for incompatible role\n"); + mutex_unlock(&rdata->rp_mutex); + rjt_data.reason = ELS_RJT_LOGIC; + rjt_data.explan = ELS_EXPL_NONE; + goto reject; + } /* * Get session payload size from incoming PLOGI. @@ -1678,7 +1705,7 @@ reject: * @rdata: The remote port that sent the PRLI request * @rx_fp: The PRLI request frame * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, @@ -1797,7 +1824,7 @@ drop: * @rdata: The remote port that sent the PRLO request * @rx_fp: The PRLO request frame * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, @@ -1868,7 +1895,7 @@ drop: * @lport: The local port that received the LOGO request * @fp: The LOGO request frame * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling * this function. */ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) @@ -1962,7 +1989,7 @@ static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, rdata->flags |= FC_RP_FLAGS_RETRY; rdata->supported_classes = FC_COS_CLASS3; - if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) + if (!(lport->service_params & FCP_SPPF_INIT_FCN)) return 0; spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 82c3fd4bc93..3d1bc67bac9 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -110,16 +110,8 @@ static void __iscsi_update_cmdsn(struct iscsi_session *session, session->exp_cmdsn = exp_cmdsn; if (max_cmdsn != session->max_cmdsn && - !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) { + !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) session->max_cmdsn = max_cmdsn; - /* - * if the window closed with IO queued, then kick the - * xmit thread - */ - if (!list_empty(&session->leadconn->cmdqueue) || - !list_empty(&session->leadconn->mgmtqueue)) - iscsi_conn_queue_work(session->leadconn); - } } void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) @@ -346,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; struct iscsi_scsi_req *hdr; - unsigned hdrlength, cmd_len; + unsigned hdrlength, cmd_len, transfer_length; itt_t itt; int rc; @@ -395,11 +387,15 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) if (rc) return rc; } + + if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) + task->protected = true; + + transfer_length = scsi_transfer_length(sc); + hdr->data_length = cpu_to_be32(transfer_length); if (sc->sc_data_direction == DMA_TO_DEVICE) { - unsigned out_len = scsi_out(sc)->length; struct iscsi_r2t_info *r2t = &task->unsol_r2t; - hdr->data_length = cpu_to_be32(out_len); hdr->flags |= ISCSI_FLAG_CMD_WRITE; /* * Write counters: @@ -418,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) memset(r2t, 0, sizeof(*r2t)); if (session->imm_data_en) { - if (out_len >= session->first_burst) + if (transfer_length >= session->first_burst) task->imm_count = min(session->first_burst, conn->max_xmit_dlength); else - task->imm_count = min(out_len, - conn->max_xmit_dlength); + task->imm_count = min(transfer_length, + conn->max_xmit_dlength); hton24(hdr->dlength, task->imm_count); } else zero_data(hdr->dlength); if (!session->initial_r2t_en) { - r2t->data_length = min(session->first_burst, out_len) - + r2t->data_length = min(session->first_burst, + transfer_length) - task->imm_count; r2t->data_offset = task->imm_count; r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); @@ -442,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) } else { hdr->flags |= ISCSI_FLAG_CMD_FINAL; zero_data(hdr->dlength); - hdr->data_length = cpu_to_be32(scsi_in(sc)->length); if (sc->sc_data_direction == DMA_FROM_DEVICE) hdr->flags |= ISCSI_FLAG_CMD_READ; @@ -470,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) scsi_bidi_cmnd(sc) ? "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", conn->id, sc, sc->cmnd[0], - task->itt, scsi_bufflen(sc), + task->itt, transfer_length, scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); @@ -481,7 +477,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) * iscsi_free_task - free a task * @task: iscsi cmd task * - * Must be called with session lock. + * Must be called with session back_lock. * This function returns the scsi command to scsi-ml or cleans * up mgmt tasks then returns the task to the pool. */ @@ -507,7 +503,6 @@ static void iscsi_free_task(struct iscsi_task *task) kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); if (sc) { - task->sc = NULL; /* SCSI eh reuses commands to verify us */ sc->SCp.ptr = NULL; /* @@ -536,9 +531,10 @@ void iscsi_put_task(struct iscsi_task *task) { struct iscsi_session *session = task->conn->session; - spin_lock_bh(&session->lock); + /* regular RX path uses back_lock */ + spin_lock_bh(&session->back_lock); __iscsi_put_task(task); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); } EXPORT_SYMBOL_GPL(iscsi_put_task); @@ -547,7 +543,7 @@ EXPORT_SYMBOL_GPL(iscsi_put_task); * @task: iscsi cmd task * @state: state to complete task with * - * Must be called with session lock. + * Must be called with session back_lock. */ static void iscsi_complete_task(struct iscsi_task *task, int state) { @@ -586,7 +582,7 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) * This is used when drivers do not need or cannot perform * lower level pdu processing. * - * Called with session lock + * Called with session back_lock */ void iscsi_complete_scsi_task(struct iscsi_task *task, uint32_t exp_cmdsn, uint32_t max_cmdsn) @@ -603,7 +599,7 @@ EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task); /* - * session lock must be held and if not called for a task that is + * session back_lock must be held and if not called for a task that is * still pending or from the xmit thread, then xmit thread must * be suspended. */ @@ -643,7 +639,10 @@ static void fail_scsi_task(struct iscsi_task *task, int err) scsi_in(sc)->resid = scsi_in(sc)->length; } + /* regular RX path uses back_lock */ + spin_lock_bh(&conn->session->back_lock); iscsi_complete_task(task, state); + spin_unlock_bh(&conn->session->back_lock); } static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, @@ -781,7 +780,10 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, return task; free_task: + /* regular RX path uses back_lock */ + spin_lock_bh(&session->back_lock); __iscsi_put_task(task); + spin_unlock_bh(&session->back_lock); return NULL; } @@ -792,10 +794,10 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, struct iscsi_session *session = conn->session; int err = 0; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) err = -EPERM; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return err; } EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); @@ -824,6 +826,33 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, sc->result = (DID_OK << 16) | rhdr->cmd_status; + if (task->protected) { + sector_t sector; + u8 ascq; + + /** + * Transports that didn't implement check_protection + * callback but still published T10-PI support to scsi-mid + * deserve this BUG_ON. + **/ + BUG_ON(!session->tt->check_protection); + + ascq = session->tt->check_protection(task, §or); + if (ascq) { + sc->result = DRIVER_SENSE << 24 | + SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(1, sc->sense_buffer, + ILLEGAL_REQUEST, 0x10, ascq); + sc->sense_buffer[7] = 0xc; /* Additional sense length */ + sc->sense_buffer[8] = 0; /* Information desc type */ + sc->sense_buffer[9] = 0xa; /* Additional desc length */ + sc->sense_buffer[10] = 0x80; /* Validity bit */ + + put_unaligned_be64(sector, &sc->sense_buffer[12]); + goto out; + } + } + if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { sc->result = DID_ERROR << 16; goto out; @@ -1014,13 +1043,13 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected " "due to DataDigest error.\n", - rejected_pdu.itt, opcode); + opcode, rejected_pdu.itt); break; case ISCSI_REASON_IMM_CMD_REJECT: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected. Too many " "immediate commands.\n", - rejected_pdu.itt, opcode); + opcode, rejected_pdu.itt); /* * We only send one TMF at a time so if the target could not * handle it, then it should get fixed (RFC mandates that @@ -1032,14 +1061,19 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (opcode != ISCSI_OP_NOOP_OUT) return 0; - if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) + if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { /* * nop-out in response to target's nop-out rejected. * Just resend. */ + /* In RX path we are under back lock */ + spin_unlock(&conn->session->back_lock); + spin_lock(&conn->session->frwd_lock); iscsi_send_nopout(conn, (struct iscsi_nopin*)&rejected_pdu); - else { + spin_unlock(&conn->session->frwd_lock); + spin_lock(&conn->session->back_lock); + } else { struct iscsi_task *task; /* * Our nop as ping got dropped. We know the target @@ -1060,8 +1094,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, default: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected. Reason " - "code 0x%x\n", rejected_pdu.itt, - rejected_pdu.opcode, reject->reason); + "code 0x%x\n", rejected_pdu.opcode, + rejected_pdu.itt, reject->reason); break; } return rc; @@ -1075,7 +1109,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * This should be used for mgmt tasks like login and nops, or if * the LDD's itt space does not include the session age. * - * The session lock must be held. + * The session back_lock must be held. */ struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) { @@ -1104,7 +1138,7 @@ EXPORT_SYMBOL_GPL(iscsi_itt_to_task); * @datalen: len of data buffer * * Completes pdu processing by freeing any resources allocated at - * queuecommand or send generic. session lock must be held and verify + * queuecommand or send generic. session back_lock must be held and verify * itt must have been called. */ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, @@ -1141,7 +1175,12 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) break; + /* In RX path we are under back lock */ + spin_unlock(&session->back_lock); + spin_lock(&session->frwd_lock); iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); + spin_unlock(&session->frwd_lock); + spin_lock(&session->back_lock); break; case ISCSI_OP_REJECT: rc = iscsi_handle_reject(conn, hdr, data, datalen); @@ -1248,9 +1287,9 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, { int rc; - spin_lock(&conn->session->lock); + spin_lock(&conn->session->back_lock); rc = __iscsi_complete_pdu(conn, hdr, data, datalen); - spin_unlock(&conn->session->lock); + spin_unlock(&conn->session->back_lock); return rc; } EXPORT_SYMBOL_GPL(iscsi_complete_pdu); @@ -1294,7 +1333,7 @@ EXPORT_SYMBOL_GPL(iscsi_verify_itt); * * This should be used for cmd tasks. * - * The session lock must be held. + * The session back_lock must be held. */ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) { @@ -1324,15 +1363,15 @@ void iscsi_session_failure(struct iscsi_session *session, struct iscsi_conn *conn; struct device *dev; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); conn = session->leadconn; if (session->state == ISCSI_STATE_TERMINATE || !conn) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return; } dev = get_device(&conn->cls_conn->dev); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); if (!dev) return; /* @@ -1352,15 +1391,15 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) { struct iscsi_session *session = conn->session; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (session->state == ISCSI_STATE_FAILED) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return; } if (conn->stop_stage == 0) session->state = ISCSI_STATE_FAILED; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); @@ -1394,15 +1433,18 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) return -ENODATA; __iscsi_get_task(task); - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->frwd_lock); rc = conn->session->tt->xmit_task(task); - spin_lock_bh(&conn->session->lock); + spin_lock_bh(&conn->session->frwd_lock); if (!rc) { /* done with this task */ task->last_xfer = jiffies; conn->task = NULL; } + /* regular RX path uses back_lock */ + spin_lock(&conn->session->back_lock); __iscsi_put_task(task); + spin_unlock(&conn->session->back_lock); return rc; } @@ -1411,7 +1453,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) * @task: task to requeue * * LLDs that need to run a task from the session workqueue should call - * this. The session lock must be held. This should only be called + * this. The session frwd_lock must be held. This should only be called * by software drivers. */ void iscsi_requeue_task(struct iscsi_task *task) @@ -1442,10 +1484,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) struct iscsi_task *task; int rc = 0; - spin_lock_bh(&conn->session->lock); + spin_lock_bh(&conn->session->frwd_lock); if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; } @@ -1466,7 +1508,10 @@ check_mgmt: struct iscsi_task, running); list_del_init(&conn->task->running); if (iscsi_prep_mgmt_task(conn, conn->task)) { + /* regular RX path uses back_lock */ + spin_lock_bh(&conn->session->back_lock); __iscsi_put_task(conn->task); + spin_unlock_bh(&conn->session->back_lock); conn->task = NULL; continue; } @@ -1528,11 +1573,11 @@ check_mgmt: if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->frwd_lock); return -ENODATA; done: - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->frwd_lock); return rc; } @@ -1568,6 +1613,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, task->have_checked_conn = false; task->last_timeout = jiffies; task->last_xfer = jiffies; + task->protected = false; INIT_LIST_HEAD(&task->running); return task; } @@ -1601,7 +1647,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); reason = iscsi_session_chkready(cls_session); if (reason) { @@ -1687,13 +1733,13 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) } session->queued_cmdsn++; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return 0; prepd_reject: iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); reject: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); return SCSI_MLQUEUE_TARGET_BUSY; @@ -1701,7 +1747,7 @@ reject: prepd_fault: iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); fault: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); if (!scsi_bidi_cmnd(sc)) @@ -1749,14 +1795,14 @@ static void iscsi_tmf_timedout(unsigned long data) struct iscsi_conn *conn = (struct iscsi_conn *)data; struct iscsi_session *session = conn->session; - spin_lock(&session->lock); + spin_lock(&session->frwd_lock); if (conn->tmf_state == TMF_QUEUED) { conn->tmf_state = TMF_TIMEDOUT; ISCSI_DBG_EH(session, "tmf timedout\n"); /* unblock eh_abort() */ wake_up(&conn->ehwait); } - spin_unlock(&session->lock); + spin_unlock(&session->frwd_lock); } static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, @@ -1769,10 +1815,10 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); if (!task) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n"); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); return -EPERM; } conn->tmfcmd_pdus_cnt++; @@ -1782,7 +1828,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, add_timer(&conn->tmf_timer); ISCSI_DBG_EH(session, "tmf set timeout\n"); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); /* @@ -1801,7 +1847,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, del_timer_sync(&conn->tmf_timer); mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); /* if the session drops it will clean up the task */ if (age != session->age || session->state != ISCSI_STATE_LOGGED_IN) @@ -1838,7 +1884,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, * iscsi_suspend_queue - suspend iscsi_queuecommand * @conn: iscsi conn to stop queueing IO on * - * This grabs the session lock to make sure no one is in + * This grabs the session frwd_lock to make sure no one is in * xmit_task/queuecommand, and then sets suspend to prevent * new commands from being queued. This only needs to be called * by offload drivers that need to sync a path like ep disconnect @@ -1847,9 +1893,9 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, */ void iscsi_suspend_queue(struct iscsi_conn *conn) { - spin_lock_bh(&conn->session->lock); + spin_lock_bh(&conn->session->frwd_lock); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - spin_unlock_bh(&conn->session->lock); + spin_unlock_bh(&conn->session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_suspend_queue); @@ -1908,7 +1954,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); - spin_lock(&session->lock); + spin_lock(&session->frwd_lock); task = (struct iscsi_task *)sc->SCp.ptr; if (!task) { /* @@ -2022,7 +2068,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) done: if (task) task->last_timeout = jiffies; - spin_unlock(&session->lock); + spin_unlock(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "nh"); return rc; @@ -2034,7 +2080,7 @@ static void iscsi_check_transport_timeouts(unsigned long data) struct iscsi_session *session = conn->session; unsigned long recv_timeout, next_timeout = 0, last_recv; - spin_lock(&session->lock); + spin_lock(&session->frwd_lock); if (session->state != ISCSI_STATE_LOGGED_IN) goto done; @@ -2051,7 +2097,7 @@ static void iscsi_check_transport_timeouts(unsigned long data) "last ping %lu, now %lu\n", conn->ping_timeout, conn->recv_timeout, last_recv, conn->last_ping, jiffies); - spin_unlock(&session->lock); + spin_unlock(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return; } @@ -2067,7 +2113,7 @@ static void iscsi_check_transport_timeouts(unsigned long data) ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout); mod_timer(&conn->transport_timer, next_timeout); done: - spin_unlock(&session->lock); + spin_unlock(&session->frwd_lock); } static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, @@ -2097,7 +2143,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "aborting sc %p\n", sc); mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); /* * if session was ISCSI_STATE_IN_RECOVERY then we may not have * got the command. @@ -2105,7 +2151,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) if (!sc->SCp.ptr) { ISCSI_DBG_EH(session, "sc never reached iscsi layer or " "it completed.\n"); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return SUCCESS; } @@ -2116,7 +2162,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) */ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || sc->SCp.phase != session->age) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); ISCSI_DBG_EH(session, "failing abort due to dropped " "session.\n"); @@ -2157,7 +2203,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) switch (conn->tmf_state) { case TMF_SUCCESS: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); /* * stop tx side incase the target had sent a abort rsp but * the initiator was still writing out data. @@ -2168,15 +2214,15 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) * good and have never sent us a successful tmf response * then sent more data for the cmd. */ - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); fail_scsi_task(task, DID_ABORT); conn->tmf_state = TMF_INITIAL; memset(hdr, 0, sizeof(*hdr)); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); goto success_unlocked; case TMF_TIMEDOUT: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto failed_unlocked; case TMF_NOT_FOUND: @@ -2195,7 +2241,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) } success: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); success_unlocked: ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", sc, task->itt); @@ -2203,7 +2249,7 @@ success_unlocked: return SUCCESS; failed: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); failed_unlocked: ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, task ? task->itt : 0); @@ -2236,7 +2282,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun); mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); /* * Just check if we are not logged in. We cannot check for * the phase because the reset could come from a ioctl. @@ -2263,7 +2309,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) case TMF_SUCCESS: break; case TMF_TIMEDOUT: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto done; default: @@ -2272,21 +2318,21 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) } rc = SUCCESS; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_suspend_tx(conn); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); conn->tmf_state = TMF_INITIAL; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); goto done; unlock: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); done: ISCSI_DBG_EH(session, "dev reset result = %s\n", rc == SUCCESS ? "SUCCESS" : "FAILED"); @@ -2299,13 +2345,13 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (session->state != ISCSI_STATE_LOGGED_IN) { session->state = ISCSI_STATE_RECOVERY_FAILED; if (session->leadconn) wake_up(&session->leadconn->ehwait); } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); } EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); @@ -2327,19 +2373,19 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc) conn = session->leadconn; mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (session->state == ISCSI_STATE_TERMINATE) { failed: ISCSI_DBG_EH(session, "failing session reset: Could not log back into " "%s, %s [age %d]\n", session->targetname, conn->persistent_address, session->age); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return FAILED; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); /* * we drop the lock here but the leadconn cannot be destoyed while @@ -2356,14 +2402,14 @@ failed: flush_signals(current); mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (session->state == ISCSI_STATE_LOGGED_IN) { ISCSI_DBG_EH(session, "session reset succeeded for %s,%s\n", session->targetname, conn->persistent_address); } else goto failed; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return SUCCESS; } @@ -2399,7 +2445,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc) session->targetname); mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); /* * Just check if we are not logged in. We cannot check for * the phase because the reset could come from a ioctl. @@ -2426,7 +2472,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc) case TMF_SUCCESS: break; case TMF_TIMEDOUT: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); goto done; default: @@ -2435,21 +2481,21 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc) } rc = SUCCESS; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_suspend_tx(conn); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, -1, DID_ERROR); conn->tmf_state = TMF_INITIAL; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_start_tx(conn); goto done; unlock: - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); done: ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, rc == SUCCESS ? "SUCCESS" : "FAILED"); @@ -2747,8 +2793,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, session->max_r2t = 1; session->tt = iscsit; session->dd_data = cls_session->dd_data + sizeof(*session); + mutex_init(&session->eh_mutex); - spin_lock_init(&session->lock); + spin_lock_init(&session->frwd_lock); + spin_lock_init(&session->back_lock); /* initialize SCSI PDU commands pool */ if (iscsi_pool_init(&session->cmdpool, session->cmds_max, @@ -2809,7 +2857,12 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) kfree(session->targetname); kfree(session->targetalias); kfree(session->initiatorname); + kfree(session->boot_root); + kfree(session->boot_nic); + kfree(session->boot_target); kfree(session->ifacename); + kfree(session->portal_type); + kfree(session->discovery_parent_type); iscsi_destroy_session(cls_session); iscsi_host_dec_session_cnt(shost); @@ -2857,14 +2910,14 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, INIT_WORK(&conn->xmitwork, iscsi_xmitworker); /* allocate login_task used for the login/text sequences */ - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (!kfifo_out(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*))) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); goto login_task_alloc_fail; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); data = (char *) __get_free_pages(GFP_KERNEL, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); @@ -2901,7 +2954,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) del_timer_sync(&conn->transport_timer); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; if (session->leadconn == conn) { /* @@ -2910,7 +2963,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) session->state = ISCSI_STATE_TERMINATE; wake_up(&conn->ehwait); } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); /* * Block until all in-progress commands for this connection @@ -2937,15 +2990,19 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) /* flush queued up work because we free the connection below */ iscsi_suspend_tx(conn); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); free_pages((unsigned long) conn->data, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); kfree(conn->persistent_address); + kfree(conn->local_ipaddr); + /* regular RX path uses back_lock */ + spin_lock_bh(&session->back_lock); kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); + spin_unlock_bh(&session->back_lock); if (session->leadconn == conn) session->leadconn = NULL; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_destroy_conn(cls_conn); } @@ -2982,7 +3039,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) conn->ping_timeout = 5; } - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_STARTED; session->state = ISCSI_STATE_LOGGED_IN; session->queued_cmdsn = session->cmdsn; @@ -3011,7 +3068,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) default: break; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); iscsi_unblock_session(session->cls_session); wake_up(&conn->ehwait); @@ -3050,9 +3107,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, int old_stop_stage; mutex_lock(&session->eh_mutex); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (conn->stop_stage == STOP_CONN_TERM) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); return; } @@ -3069,14 +3126,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, old_stop_stage = conn->stop_stage; conn->stop_stage = flag; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); del_timer_sync(&conn->transport_timer); iscsi_suspend_tx(conn); - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); conn->c_stage = ISCSI_CONN_STOPPED; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); /* * for connection level recovery we should not calculate @@ -3097,11 +3154,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, /* * flush queues. */ - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); fail_mgmt_tasks(session, conn); memset(&conn->tmhdr, 0, sizeof(conn->tmhdr)); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); mutex_unlock(&session->eh_mutex); } @@ -3128,10 +3185,10 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (is_leading) session->leadconn = conn; - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); /* * Unblock xmitworker(), Login Phase will pass through. @@ -3142,7 +3199,7 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, } EXPORT_SYMBOL_GPL(iscsi_conn_bind); -static int iscsi_switch_str_param(char **param, char *new_val_buf) +int iscsi_switch_str_param(char **param, char *new_val_buf) { char *new_val; @@ -3159,12 +3216,14 @@ static int iscsi_switch_str_param(char **param, char *new_val_buf) *param = new_val; return 0; } +EXPORT_SYMBOL_GPL(iscsi_switch_str_param); int iscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; + int val; switch(param) { case ISCSI_PARAM_FAST_ABORT: @@ -3248,6 +3307,23 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, return iscsi_switch_str_param(&session->ifacename, buf); case ISCSI_PARAM_INITIATOR_NAME: return iscsi_switch_str_param(&session->initiatorname, buf); + case ISCSI_PARAM_BOOT_ROOT: + return iscsi_switch_str_param(&session->boot_root, buf); + case ISCSI_PARAM_BOOT_NIC: + return iscsi_switch_str_param(&session->boot_nic, buf); + case ISCSI_PARAM_BOOT_TARGET: + return iscsi_switch_str_param(&session->boot_target, buf); + case ISCSI_PARAM_PORTAL_TYPE: + return iscsi_switch_str_param(&session->portal_type, buf); + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + return iscsi_switch_str_param(&session->discovery_parent_type, + buf); + case ISCSI_PARAM_DISCOVERY_SESS: + sscanf(buf, "%d", &val); + session->discovery_sess = !!val; + break; + case ISCSI_PARAM_LOCAL_IPADDR: + return iscsi_switch_str_param(&conn->local_ipaddr, buf); default: return -ENOSYS; } @@ -3296,6 +3372,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, case ISCSI_PARAM_DATASEQ_INORDER_EN: len = sprintf(buf, "%d\n", session->dataseq_inorder_en); break; + case ISCSI_PARAM_DEF_TASKMGMT_TMO: + len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo); + break; case ISCSI_PARAM_ERL: len = sprintf(buf, "%d\n", session->erl); break; @@ -3326,6 +3405,61 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, case ISCSI_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", session->initiatorname); break; + case ISCSI_PARAM_BOOT_ROOT: + len = sprintf(buf, "%s\n", session->boot_root); + break; + case ISCSI_PARAM_BOOT_NIC: + len = sprintf(buf, "%s\n", session->boot_nic); + break; + case ISCSI_PARAM_BOOT_TARGET: + len = sprintf(buf, "%s\n", session->boot_target); + break; + case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: + len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable); + break; + case ISCSI_PARAM_DISCOVERY_SESS: + len = sprintf(buf, "%u\n", session->discovery_sess); + break; + case ISCSI_PARAM_PORTAL_TYPE: + len = sprintf(buf, "%s\n", session->portal_type); + break; + case ISCSI_PARAM_CHAP_AUTH_EN: + len = sprintf(buf, "%u\n", session->chap_auth_en); + break; + case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: + len = sprintf(buf, "%u\n", session->discovery_logout_en); + break; + case ISCSI_PARAM_BIDI_CHAP_EN: + len = sprintf(buf, "%u\n", session->bidi_chap_en); + break; + case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: + len = sprintf(buf, "%u\n", session->discovery_auth_optional); + break; + case ISCSI_PARAM_DEF_TIME2WAIT: + len = sprintf(buf, "%d\n", session->time2wait); + break; + case ISCSI_PARAM_DEF_TIME2RETAIN: + len = sprintf(buf, "%d\n", session->time2retain); + break; + case ISCSI_PARAM_TSID: + len = sprintf(buf, "%u\n", session->tsid); + break; + case ISCSI_PARAM_ISID: + len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", + session->isid[0], session->isid[1], + session->isid[2], session->isid[3], + session->isid[4], session->isid[5]); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_IDX: + len = sprintf(buf, "%u\n", session->discovery_parent_idx); + break; + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + if (session->discovery_parent_type) + len = sprintf(buf, "%s\n", + session->discovery_parent_type); + else + len = sprintf(buf, "\n"); + break; default: return -ENOSYS; } @@ -3415,6 +3549,57 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_PERSISTENT_ADDRESS: len = sprintf(buf, "%s\n", conn->persistent_address); break; + case ISCSI_PARAM_STATSN: + len = sprintf(buf, "%u\n", conn->statsn); + break; + case ISCSI_PARAM_MAX_SEGMENT_SIZE: + len = sprintf(buf, "%u\n", conn->max_segment_size); + break; + case ISCSI_PARAM_KEEPALIVE_TMO: + len = sprintf(buf, "%u\n", conn->keepalive_tmo); + break; + case ISCSI_PARAM_LOCAL_PORT: + len = sprintf(buf, "%u\n", conn->local_port); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_STAT: + len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat); + break; + case ISCSI_PARAM_TCP_NAGLE_DISABLE: + len = sprintf(buf, "%u\n", conn->tcp_nagle_disable); + break; + case ISCSI_PARAM_TCP_WSF_DISABLE: + len = sprintf(buf, "%u\n", conn->tcp_wsf_disable); + break; + case ISCSI_PARAM_TCP_TIMER_SCALE: + len = sprintf(buf, "%u\n", conn->tcp_timer_scale); + break; + case ISCSI_PARAM_TCP_TIMESTAMP_EN: + len = sprintf(buf, "%u\n", conn->tcp_timestamp_en); + break; + case ISCSI_PARAM_IP_FRAGMENT_DISABLE: + len = sprintf(buf, "%u\n", conn->fragment_disable); + break; + case ISCSI_PARAM_IPV4_TOS: + len = sprintf(buf, "%u\n", conn->ipv4_tos); + break; + case ISCSI_PARAM_IPV6_TC: + len = sprintf(buf, "%u\n", conn->ipv6_traffic_class); + break; + case ISCSI_PARAM_IPV6_FLOW_LABEL: + len = sprintf(buf, "%u\n", conn->ipv6_flow_label); + break; + case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: + len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6); + break; + case ISCSI_PARAM_TCP_XMIT_WSF: + len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf); + break; + case ISCSI_PARAM_TCP_RECV_WSF: + len = sprintf(buf, "%u\n", conn->tcp_recv_wsf); + break; + case ISCSI_PARAM_LOCAL_IPADDR: + len = sprintf(buf, "%s\n", conn->local_ipaddr); + break; default: return -ENOSYS; } diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 552e8a2b6f5..60cb6dc3c6f 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -446,7 +446,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn) * iscsi_tcp_cleanup_task - free tcp_task resources * @task: iscsi task * - * must be called with session lock + * must be called with session back_lock */ void iscsi_tcp_cleanup_task(struct iscsi_task *task) { @@ -457,6 +457,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task) if (!task->sc) return; + spin_lock_bh(&tcp_task->queue2pool); /* flush task's r2t queues */ while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, @@ -470,6 +471,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task) sizeof(void*)); tcp_task->r2t = NULL; } + spin_unlock_bh(&tcp_task->queue2pool); } EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task); @@ -529,6 +531,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr; struct iscsi_r2t_info *r2t; int r2tsn = be32_to_cpu(rhdr->r2tsn); + u32 data_length; + u32 data_offset; int rc; if (tcp_conn->in.datalen) { @@ -554,40 +558,41 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) return 0; } - rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); - if (!rc) { - iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " - "Target has sent more R2Ts than it " - "negotiated for or driver has has leaked.\n"); - return ISCSI_ERR_PROTO; - } - - r2t->exp_statsn = rhdr->statsn; - r2t->data_length = be32_to_cpu(rhdr->data_length); - if (r2t->data_length == 0) { + data_length = be32_to_cpu(rhdr->data_length); + if (data_length == 0) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with zero data len\n"); - kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, - sizeof(void*)); return ISCSI_ERR_DATALEN; } - if (r2t->data_length > session->max_burst) + if (data_length > session->max_burst) ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max " "burst %u. Attempting to execute request.\n", - r2t->data_length, session->max_burst); + data_length, session->max_burst); - r2t->data_offset = be32_to_cpu(rhdr->data_offset); - if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) { + data_offset = be32_to_cpu(rhdr->data_offset); + if (data_offset + data_length > scsi_out(task->sc)->length) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with data len %u at offset %u " - "and total length %d\n", r2t->data_length, - r2t->data_offset, scsi_out(task->sc)->length); - kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, - sizeof(void*)); + "and total length %d\n", data_length, + data_offset, scsi_out(task->sc)->length); return ISCSI_ERR_DATALEN; } + spin_lock(&tcp_task->pool2queue); + rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *)); + if (!rc) { + iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " + "Target has sent more R2Ts than it " + "negotiated for or driver has leaked.\n"); + spin_unlock(&tcp_task->pool2queue); + return ISCSI_ERR_PROTO; + } + + r2t->exp_statsn = rhdr->statsn; + r2t->data_length = data_length; + r2t->data_offset = data_offset; + r2t->ttt = rhdr->ttt; /* no flip */ r2t->datasn = 0; r2t->sent = 0; @@ -595,6 +600,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) tcp_task->exp_datasn = r2tsn + 1; kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); conn->r2t_pdus_cnt++; + spin_unlock(&tcp_task->pool2queue); iscsi_requeue_task(task); return 0; @@ -667,14 +673,14 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) switch(opcode) { case ISCSI_OP_SCSI_DATA_IN: - spin_lock(&conn->session->lock); + spin_lock(&conn->session->back_lock); task = iscsi_itt_to_ctask(conn, hdr->itt); if (!task) rc = ISCSI_ERR_BAD_ITT; else rc = iscsi_tcp_data_in(conn, task); if (rc) { - spin_unlock(&conn->session->lock); + spin_unlock(&conn->session->back_lock); break; } @@ -707,11 +713,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) tcp_conn->in.datalen, iscsi_tcp_process_data_in, rx_hash); - spin_unlock(&conn->session->lock); + spin_unlock(&conn->session->back_lock); return rc; } rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); - spin_unlock(&conn->session->lock); + spin_unlock(&conn->session->back_lock); break; case ISCSI_OP_SCSI_CMD_RSP: if (tcp_conn->in.datalen) { @@ -721,18 +727,20 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) rc = iscsi_complete_pdu(conn, hdr, NULL, 0); break; case ISCSI_OP_R2T: - spin_lock(&conn->session->lock); + spin_lock(&conn->session->back_lock); task = iscsi_itt_to_ctask(conn, hdr->itt); + spin_unlock(&conn->session->back_lock); if (!task) rc = ISCSI_ERR_BAD_ITT; else if (ahslen) rc = ISCSI_ERR_AHSLEN; else if (task->sc->sc_data_direction == DMA_TO_DEVICE) { task->last_xfer = jiffies; + spin_lock(&conn->session->frwd_lock); rc = iscsi_tcp_r2t_rsp(conn, task); + spin_unlock(&conn->session->frwd_lock); } else rc = ISCSI_ERR_PROTO; - spin_unlock(&conn->session->lock); break; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: @@ -906,7 +914,6 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, ISCSI_DBG_TCP(conn, "no more data avail. Consumed %d\n", consumed); *status = ISCSI_TCP_SKB_DONE; - skb_abort_seq_read(&seq); goto skb_done; } BUG_ON(segment->copied >= segment->size); @@ -981,14 +988,13 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_task_init); static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) { - struct iscsi_session *session = task->conn->session; struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_r2t_info *r2t = NULL; if (iscsi_task_has_unsol_data(task)) r2t = &task->unsol_r2t; else { - spin_lock_bh(&session->lock); + spin_lock_bh(&tcp_task->queue2pool); if (tcp_task->r2t) { r2t = tcp_task->r2t; /* Continue with this R2T? */ @@ -1010,7 +1016,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) else r2t = tcp_task->r2t; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&tcp_task->queue2pool); } return r2t; @@ -1140,6 +1146,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session) iscsi_pool_free(&tcp_task->r2tpool); goto r2t_alloc_fail; } + spin_lock_init(&tcp_task->pool2queue); + spin_lock_init(&tcp_task->queue2pool); } return 0; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index bdb81cda840..766098af4eb 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) qc->tf.nsect = 0; } - ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); + ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis); task->uldd_task = qc; if (ata_is_atapi(qc->tf.protocol)) { memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); @@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) if (phy->attached_tproto & SAS_PROTOCOL_STP) dev->tproto = phy->attached_tproto; if (phy->attached_sata_dev) - dev->tproto |= SATA_DEV; + dev->tproto |= SAS_SATA_DEV; - if (phy->attached_dev_type == SATA_PENDING) - dev->dev_type = SATA_PENDING; + if (phy->attached_dev_type == SAS_SATA_PENDING) + dev->dev_type = SAS_SATA_PENDING; else { int res; - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; res = sas_get_report_phy_sata(dev->parent, phy->phy_id, &dev->sata_dev.rps_resp); if (res) { @@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) int res; /* we weren't pending, so successfully end the reset sequence now */ - if (dev->dev_type != SATA_PENDING) + if (dev->dev_type != SAS_SATA_PENDING) return 1; /* hmmm, if this succeeds do we need to repost the domain_device to the @@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link) return 0; switch (ex_phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: return 0; - case SAS_END_DEV: + case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); default: @@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev) struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; - if (dev->dev_type == SATA_PENDING) + if (dev->dev_type == SAS_SATA_PENDING) return; if ((fis->sector_count == 1 && /* ATA */ @@ -700,46 +700,26 @@ void sas_probe_sata(struct asd_sas_port *port) } -static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func) +static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func) { struct domain_device *dev, *n; - bool retry = false; list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { - int rc; - if (!dev_is_sata(dev)) continue; sas_ata_wait_eh(dev); - rc = dev->sata_dev.pm_result; - if (rc == -EAGAIN) - retry = true; - else if (rc) { - /* since we don't have a - * ->port_{suspend|resume} routine in our - * ata_port ops, and no entanglements with - * acpi, suspend should just be mechanical trip - * through eh, catch cases where these - * assumptions are invalidated - */ - WARN_ONCE(1, "failed %s %s error: %d\n", func, - dev_name(&dev->rphy->dev), rc); - } /* if libata failed to power manage the device, tear it down */ if (ata_dev_disabled(sas_to_ata_dev(dev))) sas_fail_probe(dev, func, -ENODEV); } - - return retry; } void sas_suspend_sata(struct asd_sas_port *port) { struct domain_device *dev; - retry: mutex_lock(&port->ha->disco_mutex); list_for_each_entry(dev, &port->dev_list, dev_list_node) { struct sata_device *sata; @@ -751,20 +731,17 @@ void sas_suspend_sata(struct asd_sas_port *port) if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND) continue; - sata->pm_result = -EIO; - ata_sas_port_async_suspend(sata->ap, &sata->pm_result); + ata_sas_port_suspend(sata->ap); } mutex_unlock(&port->ha->disco_mutex); - if (sas_ata_flush_pm_eh(port, __func__)) - goto retry; + sas_ata_flush_pm_eh(port, __func__); } void sas_resume_sata(struct asd_sas_port *port) { struct domain_device *dev; - retry: mutex_lock(&port->ha->disco_mutex); list_for_each_entry(dev, &port->dev_list, dev_list_node) { struct sata_device *sata; @@ -776,13 +753,11 @@ void sas_resume_sata(struct asd_sas_port *port) if (sata->ap->pm_mesg.event == PM_EVENT_ON) continue; - sata->pm_result = -EIO; - ata_sas_port_async_resume(sata->ap, &sata->pm_result); + ata_sas_port_resume(sata->ap); } mutex_unlock(&port->ha->disco_mutex); - if (sas_ata_flush_pm_eh(port, __func__)) - goto retry; + sas_ata_flush_pm_eh(port, __func__); } /** @@ -797,7 +772,7 @@ int sas_discover_sata(struct domain_device *dev) { int res; - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) return -ENODEV; sas_get_ata_command_set(dev); diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index a0c3003e0c7..62b58d38ce2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -39,11 +39,11 @@ void sas_init_dev(struct domain_device *dev) { switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: INIT_LIST_HEAD(&dev->ex_dev.children); mutex_init(&dev->ex_dev.cmd_mutex); break; @@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port) if (fis->interrupt_reason == 1 && fis->lbal == 1 && fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0) - dev->dev_type = SATA_PM; + dev->dev_type = SAS_SATA_PM; else - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; } else { struct sas_identify_frame *id = @@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->port = port; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: rc = sas_ata_init(dev); if (rc) { rphy = NULL; break; } /* fall through */ - case SAS_END_DEV: + case SAS_END_DEVICE: rphy = sas_end_device_alloc(port->port); break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->rphy = rphy; get_device(&dev->rphy->dev); - if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) + if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE) list_add_tail(&dev->disco_list_node, &port->disco_list); else { spin_lock_irq(&port->dev_list_lock); @@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref) dev->phy = NULL; /* remove the phys and ports, everything else should be gone */ - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) kfree(dev->ex_dev.ex_phy); if (dev_is_sata(dev) && dev->sata_dev.ap) { @@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d spin_unlock_irq(&port->dev_list_lock); spin_lock_irq(&ha->lock); - if (dev->dev_type == SAS_END_DEV && + if (dev->dev_type == SAS_END_DEVICE && !list_empty(&dev->ssp_dev.eh_list_node)) { list_del_init(&dev->ssp_dev.eh_list_node); ha->eh_active--; @@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work) task_pid_nr(current)); switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: error = sas_discover_end_dev(dev); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: error = sas_discover_root_expander(dev); break; - case SATA_DEV: - case SATA_PM: + case SAS_SATA_DEV: + case SAS_SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index aec2e0da501..0cac7d8fd0f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) } } -static enum sas_dev_type to_dev_type(struct discover_resp *dr) +static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ - if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && + if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) - return SATA_PENDING; + return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; @@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) linkrate = phy->linkrate; memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + /* Handle vacant phy - rest of dr data is not valid so skip it */ + if (phy->phy_state == PHY_VACANT) { + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + phy->attached_dev_type = SAS_PHY_UNUSED; + if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { + phy->phy_id = phy_id; + goto skip; + } else + goto out; + } + phy->attached_dev_type = to_dev_type(dr); if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) goto out; @@ -248,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* help some expanders that fail to zero sas_address in the 'no * device' case */ - if (phy->attached_dev_type == NO_DEVICE || + if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else @@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; + skip: if (new_phy) if (sas_phy_add(phy->phy)) { sas_phy_free(phy->phy); @@ -280,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) out: switch (phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: type = "stp pending"; break; - case NO_DEVICE: + case SAS_PHY_UNUSED: type = "no device"; break; - case SAS_END_DEV: + case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; @@ -299,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) type = "ssp"; } break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: @@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single) if (!disc_req) return -ENOMEM; - disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); if (!disc_resp) { kfree(disc_req); return -ENOMEM; @@ -821,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev( } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { - child->dev_type = SAS_END_DEV; + child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) @@ -920,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander( switch (phy->attached_dev_type) { - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -1001,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); - if (ex_phy->attached_dev_type == NO_DEVICE) { + if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); @@ -1010,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; - if (ex_phy->attached_dev_type != SAS_END_DEV && - ex_phy->attached_dev_type != FANOUT_DEV && - ex_phy->attached_dev_type != EDGE_DEV && - ex_phy->attached_dev_type != SATA_PENDING) { + if (ex_phy->attached_dev_type != SAS_END_DEVICE && + ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), @@ -1037,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } switch (ex_phy->attached_dev_type) { - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", @@ -1055,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: @@ -1099,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == EDGE_DEV || - phy->attached_dev_type == FANOUT_DEV) && + if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); @@ -1118,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev) u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { - if (child->dev_type != EDGE_DEV && - child->dev_type != FANOUT_DEV) + if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); @@ -1196,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) int i; u8 *sub_sas_addr = NULL; - if (dev->dev_type != EDGE_DEV) + if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { @@ -1206,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == FANOUT_DEV || - phy->attached_dev_type == EDGE_DEV) && + if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) @@ -1233,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *child_phy) { static const char *ex_type[] = { - [EDGE_DEV] = "edge", - [FANOUT_DEV] = "fanout", + [SAS_EDGE_EXPANDER_DEVICE] = "edge", + [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; @@ -1309,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child) if (!child->parent) return 0; - if (child->parent->dev_type != EDGE_DEV && - child->parent->dev_type != FANOUT_DEV) + if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; @@ -1329,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child) child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { - case EDGE_DEV: - if (child->dev_type == FANOUT_DEV) { + case SAS_EDGE_EXPANDER_DEVICE: + if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1354,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child) } } break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1607,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { - if (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); @@ -1708,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev, } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, - u8 *sas_addr, enum sas_dev_type *type) + u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; @@ -1837,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { - if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { + if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; @@ -1854,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); @@ -1875,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); @@ -1904,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root, int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) { + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); @@ -1958,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } @@ -1967,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) return res; } -static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) +static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went - * SAS_END_DEV to SATA_PENDING the link needs recovery + * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ - if ((old == SATA_PENDING && new == SAS_END_DEV) || - (old == SAS_END_DEV && new == SATA_PENDING)) + if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || + (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; @@ -1986,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; - enum sas_dev_type type = NO_DEVICE; + enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; @@ -2020,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) sas_ex_phy_discover(dev, phy_id); - if (ata_dev && phy->attached_dev_type == SATA_PENDING) + if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); @@ -2151,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } /* do we need to support multiple segments? */ - if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { - printk("%s: multiple segments req %u %u, rsp %u %u\n", - __func__, req->bio->bi_vcnt, blk_rq_bytes(req), - rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); + if (bio_multiple_segments(req->bio) || + bio_multiple_segments(rsp->bio)) { + printk("%s: multiple segments req %u, rsp %u\n", + __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); return -EINVAL; } diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 1de67964e5a..7e7ba83f0a2 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev, rphy->identify.initiator_port_protocols = dev->iproto; rphy->identify.target_port_protocols = dev->tproto; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: /* FIXME: need sata device type */ - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: rphy->identify.device_type = SAS_END_DEVICE; break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; break; default: diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 1398b714c01..d3c5297c6c8 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) continue; } - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { dev->ex_dev.ex_change_count = -1; for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 6e795a174a1..25d0f127424 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -167,7 +167,7 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, int_to_scsilun(cmd->device->lun, &lun); memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); task->ssp_task.task_attr = TASK_ATTR_SIMPLE; - memcpy(task->ssp_task.cdb, cmd->cmnd, 16); + task->ssp_task.cmd = cmd; task->scatter = scsi_sglist(cmd); task->num_scatter = scsi_sg_count(cmd); @@ -862,7 +862,7 @@ out: enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) { - scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd); + scmd_dbg(cmd, "command %p timed out\n", cmd); return BLK_EH_NOT_HANDLED; } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 7706c99ec8b..434e9037908 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -46,10 +46,15 @@ struct lpfc_sli2_slim; #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi cmnd for menlo needs nearly twice as for firmware downloads using bsg */ -#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ + +#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ +#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ +#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ + #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ -#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ @@ -66,8 +71,8 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ -#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ +/* 1 Second */ +#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -414,6 +419,7 @@ struct lpfc_vport { uint32_t cfg_enable_da_id; uint32_t cfg_max_scsicmpl_time; uint32_t cfg_tgt_queue_depth; + uint32_t cfg_first_burst_size; uint32_t dev_loss_tmo_changed; @@ -634,6 +640,7 @@ struct lpfc_hba { #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ +#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -671,6 +678,7 @@ struct lpfc_hba { uint32_t lmt; uint32_t fc_topology; /* link topology, from LINK INIT */ + uint32_t fc_topology_changed; /* link topology, from LINK INIT */ struct lpfc_stats fc_stat; @@ -699,11 +707,12 @@ struct lpfc_hba { uint32_t cfg_multi_ring_type; uint32_t cfg_poll; uint32_t cfg_poll_tmo; + uint32_t cfg_task_mgmt_tmo; uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; - uint32_t cfg_fcp_wq_count; - uint32_t cfg_fcp_eq_count; + uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_io_channel; + uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -712,6 +721,20 @@ struct lpfc_hba { uint32_t cfg_hba_queue_depth; uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; + uint32_t cfg_fof; + uint32_t cfg_EnableXLane; + uint8_t cfg_oas_tgt_wwpn[8]; + uint8_t cfg_oas_vpt_wwpn[8]; + uint32_t cfg_oas_lun_state; +#define OAS_LUN_ENABLE 1 +#define OAS_LUN_DISABLE 0 + uint32_t cfg_oas_lun_status; +#define OAS_LUN_STATUS_EXISTS 0x01 + uint32_t cfg_oas_flags; +#define OAS_FIND_ANY_VPORT 0x01 +#define OAS_FIND_ANY_TARGET 0x02 +#define OAS_LUN_VALID 0x04 + uint32_t cfg_XLanePriority; uint32_t cfg_enable_bg; uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; @@ -720,6 +743,7 @@ struct lpfc_hba { uint32_t cfg_request_firmware_upgrade; uint32_t cfg_iocb_cnt; uint32_t cfg_suppress_link_up; + uint32_t cfg_rrq_xri_bitmap_sz; #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ @@ -804,8 +828,10 @@ struct lpfc_hba { uint64_t bg_reftag_err_cnt; /* fastpath list. */ - spinlock_t scsi_buf_list_lock; - struct list_head lpfc_scsi_buf_list; + spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ + spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ + struct list_head lpfc_scsi_buf_list_get; + struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; @@ -823,6 +849,7 @@ struct lpfc_hba { mempool_t *mbox_mem_pool; mempool_t *nlp_mem_pool; mempool_t *rrq_pool; + mempool_t *active_rrq_pool; struct fc_host_statistics link_stats; enum intr_type_t intr_type; @@ -857,7 +884,6 @@ struct lpfc_hba { atomic_t num_cmd_success; unsigned long last_rsrc_error_time; unsigned long last_ramp_down_time; - unsigned long last_ramp_up_time; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct dentry *hba_debugfs_root; atomic_t debugfs_vport_count; @@ -959,6 +985,9 @@ struct lpfc_hba { atomic_t sdev_cnt; uint8_t fips_spec_rev; uint8_t fips_level; + spinlock_t devicelock; /* lock for luns list */ + mempool_t *device_data_mem_pool; + struct list_head luns; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index a364cae9e98..1d7a5c34ee8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr, } /** + * lpfc_oas_supported_show - Return whether or not Optimized Access Storage + * (OAS) is supported. + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", + phba->sli4_hba.pc_sli4_params.oas_supported); +} + +/** * lpfc_link_state_store - Transition the link_state on an HBA port * @dev: class device that is converted into a Scsi_host. * @attr: device attribute, not used. @@ -692,7 +713,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - while (pring->txcmplq_cnt) { + while (!list_empty(&pring->txcmplq)) { msleep(10); if (cnt++ > 500) { /* 5 secs */ lpfc_printf_log(phba, @@ -744,10 +765,12 @@ lpfc_selective_reset(struct lpfc_hba *phba) if (!phba->cfg_enable_hba_reset) return -EACCES; - status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) { + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (status != 0) - return status; + if (status != 0) + return status; + } init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, @@ -814,7 +837,7 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, * the readyness after performing a firmware reset. * * Returns: - * zero for success, -EPERM when port does not have privilage to perform the + * zero for success, -EPERM when port does not have privilege to perform the * reset, -EIO when port timeout from recovering from the reset. * * Note: @@ -831,7 +854,7 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); - /* verify if privilaged for the request operation */ + /* verify if privileged for the request operation */ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && !bf_get(lpfc_sliport_status_err, &portstat_reg)) return -EPERM; @@ -895,10 +918,16 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } + + if (opcode == LPFC_FW_DUMP) + phba->hba_flag |= HBA_FW_DUMP_OP; + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (status != 0) + if (status != 0) { + phba->hba_flag &= ~HBA_FW_DUMP_OP; return status; + } /* wait for the device to be quiesced before firmware reset */ msleep(100); @@ -922,9 +951,9 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) rc = lpfc_sli4_pdev_status_reg_wait(phba); if (rc == -EPERM) { - /* no privilage for reset */ + /* no privilege for reset */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "3150 No privilage to perform the requested " + "3150 No privilege to perform the requested " "access: x%x\n", reg_val); } else if (rc == -EIO) { /* reset failed, there is nothing more we can do */ @@ -1862,8 +1891,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ { \ if (val >= minval && val <= maxval) {\ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ - "3053 lpfc_" #attr " changed from %d to %d\n", \ - vport->cfg_##attr, val); \ + "3053 lpfc_" #attr \ + " changed from %d (x%x) to %d (x%x)\n", \ + vport->cfg_##attr, vport->cfg_##attr, \ + val, val); \ vport->cfg_##attr = val;\ return 0;\ }\ @@ -2036,9 +2067,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO, lpfc_sriov_hw_max_virtfn_show, NULL); static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); +static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, + NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; +#define WWN_SZ 8 +/** + * lpfc_wwn_set - Convert string to the 8 byte WWN value. + * @buf: WWN string. + * @cnt: Length of string. + * @wwn: Array to receive converted wwn value. + * + * Returns: + * -EINVAL if the buffer does not contain a valid wwn + * 0 success + **/ +static size_t +lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) +{ + unsigned int i, j; + /* Count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || + ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + return -EINVAL; + + memset(wwn, 0, WWN_SZ); + + /* Validate and store the new name */ + for (i = 0, j = 0; i < 16; i++) { + if ((*buf >= 'a') && (*buf <= 'f')) + j = ((j << 4) | ((*buf++ - 'a') + 10)); + else if ((*buf >= 'A') && (*buf <= 'F')) + j = ((j << 4) | ((*buf++ - 'A') + 10)); + else if ((*buf >= '0') && (*buf <= '9')) + j = ((j << 4) | (*buf++ - '0')); + else + return -EINVAL; + if (i % 2) { + wwn[i/2] = j & 0xff; + j = 0; + } + } + return 0; +} /** * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid * @dev: class device that is converted into a Scsi_host. @@ -2127,9 +2202,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; struct completion online_compl; - int stat1=0, stat2=0; - unsigned int i, j, cnt=count; - u8 wwpn[8]; + int stat1 = 0, stat2 = 0; + unsigned int cnt = count; + u8 wwpn[WWN_SZ]; int rc; if (!phba->cfg_enable_hba_reset) @@ -2144,29 +2219,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, if (buf[cnt-1] == '\n') cnt--; - if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || - ((cnt == 17) && (*buf++ != 'x')) || - ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + if (!phba->soft_wwn_enable) return -EINVAL; + /* lock setting wwpn, wwnn down */ phba->soft_wwn_enable = 0; - memset(wwpn, 0, sizeof(wwpn)); - - /* Validate and store the new name */ - for (i=0, j=0; i < 16; i++) { - int value; - - value = hex_to_bin(*buf++); - if (value >= 0) - j = (j << 4) | value; - else - return -EINVAL; - if (i % 2) { - wwpn[i/2] = j & 0xff; - j = 0; - } + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (!rc) { + /* not able to set wwpn, unlock it */ + phba->soft_wwn_enable = 1; + return rc; } + phba->cfg_soft_wwpn = wwn_to_u64(wwpn); fc_host_port_name(shost) = phba->cfg_soft_wwpn; if (phba->cfg_soft_wwnn) @@ -2193,7 +2258,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, "reinit adapter - %d\n", stat2); return (stat1 || stat2) ? -EIO : count; } -static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ +static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR, lpfc_soft_wwpn_show, lpfc_soft_wwpn_store); /** @@ -2230,39 +2295,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - unsigned int i, j, cnt=count; - u8 wwnn[8]; + unsigned int cnt = count; + u8 wwnn[WWN_SZ]; + int rc; /* count may include a LF at end of string */ if (buf[cnt-1] == '\n') cnt--; - if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || - ((cnt == 17) && (*buf++ != 'x')) || - ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) + if (!phba->soft_wwn_enable) return -EINVAL; - /* - * Allow wwnn to be set many times, as long as the enable is set. - * However, once the wwpn is set, everything locks. - */ - - memset(wwnn, 0, sizeof(wwnn)); - - /* Validate and store the new name */ - for (i=0, j=0; i < 16; i++) { - int value; - - value = hex_to_bin(*buf++); - if (value >= 0) - j = (j << 4) | value; - else - return -EINVAL; - if (i % 2) { - wwnn[i/2] = j & 0xff; - j = 0; - } + rc = lpfc_wwn_set(buf, cnt, wwnn); + if (!rc) { + /* Allow wwnn to be set many times, as long as the enable + * is set. However, once the wwpn is set, everything locks. + */ + return rc; } + phba->cfg_soft_wwnn = wwn_to_u64(wwnn); dev_printk(KERN_NOTICE, &phba->pcidev->dev, @@ -2271,9 +2322,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ +static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR, lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); +/** + * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count + **/ +static ssize_t +lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "0x%llx\n", + wwn_to_u64(phba->cfg_oas_tgt_wwpn)); +} + +/** + * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for + * Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + uint8_t wwpn[WWN_SZ]; + int rc; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (rc) + return rc; + + memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); + memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); + if (wwn_to_u64(wwpn) == 0) + phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; + else + phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; + phba->cfg_oas_flags &= ~OAS_LUN_VALID; + phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; + return count; +} +static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, + lpfc_oas_tgt_show, lpfc_oas_tgt_store); + +/** + * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "0x%llx\n", + wwn_to_u64(phba->cfg_oas_vpt_wwpn)); +} + +/** + * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + unsigned int cnt = count; + uint8_t wwpn[WWN_SZ]; + int rc; + + if (!phba->cfg_fof) + return -EPERM; + + /* count may include a LF at end of string */ + if (buf[cnt-1] == '\n') + cnt--; + + rc = lpfc_wwn_set(buf, cnt, wwpn); + if (rc) + return rc; + + memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); + memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); + if (wwn_to_u64(wwpn) == 0) + phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; + else + phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; + phba->cfg_oas_flags &= ~OAS_LUN_VALID; + phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; + return count; +} +static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, + lpfc_oas_vpt_show, lpfc_oas_vpt_store); + +/** + * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) + * of whether luns will be enabled or disabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); +} + +/** + * lpfc_oas_lun_state_store - Store the state (enabled or disabled) + * of whether luns will be enabled or disabled + * for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int val = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "%i", &val) != 1) + return -EINVAL; + + if ((val != 0) && (val != 1)) + return -EINVAL; + + phba->cfg_oas_lun_state = val; + + return strlen(buf); +} +static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, + lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); + +/** + * lpfc_oas_lun_status_show - Return the status of the Optimized Access + * Storage (OAS) lun returned by the + * lpfc_oas_lun_show function. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) + return -EFAULT; + + return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); +} +static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, + lpfc_oas_lun_status_show, NULL); + + +/** + * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage + * (OAS) operations. + * @phba: lpfc_hba pointer. + * @ndlp: pointer to fcp target node. + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the lun. + * + * Returns: + * SUCCESS : 0 + * -EPERM OAS is not enabled or not supported by this port. + * + */ +static size_t +lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state) +{ + + int rc = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (oas_state) { + if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, lun)) + rc = -ENOMEM; + } else { + lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, lun); + } + return rc; + +} + +/** + * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized + * Access Storage (OAS) operations. + * @phba: lpfc_hba pointer. + * @vpt_wwpn: wwpn of the vport associated with the returned lun + * @tgt_wwpn: wwpn of the target associated with the returned lun + * @lun_status: status of the lun returned lun + * + * Returns the first or next lun enabled for OAS operations for the vport/target + * specified. If a lun is found, its vport wwpn, target wwpn and status is + * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned. + * + * Return: + * lun that is OAS enabled for the vport/target + * NOT_OAS_ENABLED_LUN when no oas enabled lun found. + */ +static uint64_t +lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint32_t *lun_status) +{ + uint64_t found_lun; + + if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) + return NOT_OAS_ENABLED_LUN; + if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) + phba->sli4_hba.oas_next_vpt_wwpn, + (struct lpfc_name *) + phba->sli4_hba.oas_next_tgt_wwpn, + &phba->sli4_hba.oas_next_lun, + (struct lpfc_name *)vpt_wwpn, + (struct lpfc_name *)tgt_wwpn, + &found_lun, lun_status)) + return found_lun; + else + return NOT_OAS_ENABLED_LUN; +} + +/** + * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations + * @phba: lpfc_hba pointer. + * @vpt_wwpn: vport wwpn by reference. + * @tgt_wwpn: target wwpn by reference. + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the oas_lun. + * + * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) + * a lun for OAS operations. + * + * Return: + * SUCCESS: 0 + * -ENOMEM: failed to enable an lun for OAS operations + * -EPERM: OAS is not enabled + */ +static ssize_t +lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], + uint8_t tgt_wwpn[], uint64_t lun, + uint32_t oas_state) +{ + + int rc; + + rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, + oas_state); + return rc; +} + +/** + * lpfc_oas_lun_show - Return oas enabled luns from a chosen target + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * This routine returns a lun enabled for OAS each time the function + * is called. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + **/ +static ssize_t +lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + uint64_t oas_lun; + int len = 0; + + if (!phba->cfg_fof) + return -EPERM; + + if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) + if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) + return -EFAULT; + + if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) + if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) + return -EFAULT; + + oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, + phba->cfg_oas_tgt_wwpn, + &phba->cfg_oas_lun_status); + if (oas_lun != NOT_OAS_ENABLED_LUN) + phba->cfg_oas_flags |= OAS_LUN_VALID; + + len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); + + return len; +} + +/** + * lpfc_oas_lun_store - Sets the OAS state for lun + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * This function sets the OAS state for lun. Before this function is called, + * the vport wwpn, target wwpn, and oas state need to be set. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint64_t scsi_lun; + ssize_t rc; + + if (!phba->cfg_fof) + return -EPERM; + + if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) + return -EFAULT; + + if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) + return -EFAULT; + + if (!isdigit(buf[0])) + return -EINVAL; + + if (sscanf(buf, "0x%llx", &scsi_lun) != 1) + return -EINVAL; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3372 Try to set vport 0x%llx target 0x%llx lun:%lld " + "with oas set to %d\n", + wwn_to_u64(phba->cfg_oas_vpt_wwpn), + wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, + phba->cfg_oas_lun_state); + + rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, + phba->cfg_oas_tgt_wwpn, scsi_lun, + phba->cfg_oas_lun_state); + + if (rc) + return rc; + + return count; +} +static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, + lpfc_oas_lun_show, lpfc_oas_lun_store); static int lpfc_poll = 0; module_param(lpfc_poll, int, S_IRUGO); @@ -2302,11 +2782,17 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); -int lpfc_enable_rrq; +int lpfc_enable_rrq = 2; module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); lpfc_param_show(enable_rrq); -lpfc_param_init(enable_rrq, 0, 0, 1); +/* +# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures +# 0x0 = disabled, XRI/OXID use not tracked. +# 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. +# 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent. +*/ +lpfc_param_init(enable_rrq, 2, 0, 2); static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); /* @@ -2580,9 +3066,12 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1, /* # lun_queue_depth: This parameter is used to limit the number of outstanding -# commands per FCP LUN. Value range is [1,128]. Default value is 30. +# commands per FCP LUN. Value range is [1,512]. Default value is 30. +# If this parameter value is greater than 1/8th the maximum number of exchanges +# supported by the HBA port, then the lun queue depth will be reduced to +# 1/8th the maximum number of exchanges. */ -LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, +LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512, "Max number of FCP commands we can queue to a specific LUN"); /* @@ -2590,7 +3079,7 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128, # commands per target port. Value range is [10,65535]. Default value is 65535. */ LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535, - "Max number of FCP commands we can queue to a specific target port"); + "Max number of FCP commands we can queue to a specific target port"); /* # hba_queue_depth: This parameter is used to limit the number of outstanding @@ -2795,6 +3284,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); + if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) + phba->fc_topology_changed = 1; err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; @@ -3786,6 +4277,157 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, lpfc_fcp_imax_show, lpfc_fcp_imax_store); +/** + * lpfc_state_show - Display current driver CPU affinity + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vector_map_info *cpup; + int len = 0; + + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->intr_type != MSIX)) + return len; + + switch (phba->cfg_fcp_cpu_map) { + case 0: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: No mapping (%d)\n", + phba->cfg_fcp_cpu_map); + return len; + case 1: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: HBA centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + case 2: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: Driver centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + } + + while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { + cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; + + /* margin should fit in this and the truncated message */ + if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->channel_id, cpup->phys_id, + cpup->core_id); + else + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d IRQ %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->channel_id, cpup->phys_id, + cpup->core_id, cpup->irq); + + phba->sli4_hba.curr_disp_cpu++; + + /* display max number of CPUs keeping some margin */ + if (phba->sli4_hba.curr_disp_cpu < + phba->sli4_hba.num_present_cpu && + (len >= (PAGE_SIZE - 64))) { + len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); + break; + } + } + + if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu) + phba->sli4_hba.curr_disp_cpu = 0; + + return len; +} + +/** + * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL - Not implemented yet. + **/ +static ssize_t +lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int status = -EINVAL; + return status; +} + +/* +# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors +# for the HBA. +# +# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). +# 0 - Do not affinitze IRQ vectors +# 1 - Affintize HBA vectors with respect to each HBA +# (start with CPU0 for each HBA) +# 2 - Affintize HBA vectors with respect to the entire driver +# (round robin thru all CPUs across all HBAs) +*/ +static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; +module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_cpu_map, + "Defines how to map CPUs to IRQ vectors per HBA"); + +/** + * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0-2], then affinitze the adapter's + * MSIX vectors. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_cpu_map = 0; + return 0; + } + + if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { + phba->cfg_fcp_cpu_map = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3326 fcp_cpu_map: %d out of range, using default\n", + val); + phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR, + lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -3801,6 +4443,14 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* +# lpfc_first_burst_size: First burst size to use on the NPorts +# that support first burst. +# Value range is [0,65536]. Default value is 0. +*/ +LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536, + "First burst size for Targets that support first burst"); + +/* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue # depth. Default value is 0. When the value of this parameter is zero the # SCSI command completion time is not used for controlling I/O queue depth. When @@ -3854,8 +4504,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); # For [0], FCP commands are issued to Work Queues ina round robin fashion. # For [1], FCP commands are issued to a Work Queue associated with the # current CPU. +# It would be set to 1 by the driver if it's able to set up cpu affinity +# for FCP I/Os through Work Queue associated with the current CPU. Otherwise, +# roundrobin scheduling of FCP I/Os through WQs will be used. */ -LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " +LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " "issuing commands [0] - Round Robin, [1] - Current CPU"); /* @@ -3922,11 +4575,28 @@ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands " "during discovery"); /* -# lpfc_max_luns: maximum allowed LUN. +# lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that +# will be scanned by the SCSI midlayer when sequential scanning is +# used; and is also the highest LUN ID allowed when the SCSI midlayer +# parses REPORT_LUN responses. The lpfc driver has no LUN count or +# LUN ID limit, but the SCSI midlayer requires this field for the uses +# above. The lpfc driver limits the default value to 255 for two reasons. +# As it bounds the sequential scan loop, scanning for thousands of luns +# on a target can take minutes of wall clock time. Additionally, +# there are FC targets, such as JBODs, that only recognize 8-bits of +# LUN ID. When they receive a value greater than 8 bits, they chop off +# the high order bits. In other words, they see LUN IDs 0, 256, 512, +# and so on all as LUN ID 0. This causes the linux kernel, which sees +# valid responses at each of the LUN IDs, to believe there are multiple +# devices present, when in fact, there is only 1. +# A customer that is aware of their target behaviors, and the results as +# indicated above, is welcome to increase the lpfc_max_luns value. +# As mentioned, this value is not used by the lpfc driver, only the +# SCSI midlayer. # Value range is [0,65535]. Default value is 255. # NOTE: The SCSI layer might probe all allowed LUN on some old targets. */ -LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN"); +LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID"); /* # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring. @@ -3936,6 +4606,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, "Milliseconds driver will wait between polling FCP ring"); /* +# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands +# to complete in seconds. Value range is [5,180], default value is 60. +*/ +LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, + "Maximum time to wait for task management commands to complete"); +/* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature # 0 = MSI disabled @@ -3947,25 +4623,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* -# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues -# This parameter is ignored and will eventually be depricated -# -# Value range is [1,7]. Default value is 4. -*/ -LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, - LPFC_FCP_IO_CHAN_MAX, - "Set the number of fast-path FCP work queues, if possible"); - -/* -# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels -# -# Value range is [1,7]. Default value is 4. -*/ -LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN, - LPFC_FCP_IO_CHAN_MAX, - "Set the number of fast-path FCP event queues, if possible"); - -/* # lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels # # Value range is [1,7]. Default value is 4. @@ -3991,6 +4648,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); /* +# lpfc_EnableXLane: Enable Express Lane Feature +# 0x0 Express Lane Feature disabled +# 0x1 Express Lane Feature enabled +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); + +/* +# lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature +# 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) +# Value range is [0x0,0x7f]. Default value is 0 +*/ +LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); + +/* # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) # 0 = BlockGuard disabled (default) # 1 = BlockGuard enabled @@ -4003,12 +4675,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); # 0 = disabled (default) # 1 = enabled # Value range is [0,1]. Default value is 0. +# +# This feature in under investigation and may be supported in the future. */ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; -module_param(lpfc_fcp_look_ahead, uint, S_IRUGO); -MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions"); - /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the @@ -4065,16 +4736,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery, /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count - * This value can be set to values between 64 and 256. The default value is + * This value can be set to values between 64 and 4096. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). + * Because of the additional overhead involved in setting up T10-DIF, + * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 + * and will be limited to 512 if BlockGuard is enabled under SLI3. */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); -LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, - LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, - "Max Protection Scatter Gather Segment Count"); +/* + * This parameter will be depricated, the driver cannot limit the + * protection data s/g list. + */ +LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, + LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, + "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, @@ -4105,6 +4783,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, + &dev_attr_lpfc_first_burst_size, &dev_attr_lpfc_ack0, &dev_attr_lpfc_topology, &dev_attr_lpfc_scan_down, @@ -4133,10 +4812,10 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_issue_reset, &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, + &dev_attr_lpfc_task_mgmt_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, - &dev_attr_lpfc_fcp_wq_count, - &dev_attr_lpfc_fcp_eq_count, + &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_io_channel, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, @@ -4144,6 +4823,13 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_soft_wwn_enable, &dev_attr_lpfc_enable_hba_reset, &dev_attr_lpfc_enable_hba_heartbeat, + &dev_attr_lpfc_EnableXLane, + &dev_attr_lpfc_XLanePriority, + &dev_attr_lpfc_xlane_lun, + &dev_attr_lpfc_xlane_tgt, + &dev_attr_lpfc_xlane_vpt, + &dev_attr_lpfc_xlane_lun_state, + &dev_attr_lpfc_xlane_lun_status, &dev_attr_lpfc_sg_seg_cnt, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, @@ -4162,6 +4848,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_dss, &dev_attr_lpfc_sriov_hw_max_virtfn, &dev_attr_protocol, + &dev_attr_lpfc_xlane_supported, NULL, }; @@ -4180,6 +4867,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_restrict_login, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, + &dev_attr_lpfc_first_burst_size, &dev_attr_lpfc_fdmi_on, &dev_attr_lpfc_max_luns, &dev_attr_nport_evt_cnt, @@ -5112,21 +5800,30 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_topology_init(phba, lpfc_topology); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); + lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); - lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); - lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); + lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); + lpfc_EnableXLane_init(phba, lpfc_EnableXLane); + if (phba->sli_rev != LPFC_SLI_REV4) + phba->cfg_EnableXLane = 0; + lpfc_XLanePriority_init(phba, lpfc_XLanePriority); + memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); + memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); + phba->cfg_oas_lun_state = 0; + phba->cfg_oas_lun_status = 0; + phba->cfg_oas_flags = 0; lpfc_enable_bg_init(phba, lpfc_enable_bg); if (phba->sli_rev == LPFC_SLI_REV4) phba->cfg_poll = 0; else - phba->cfg_poll = lpfc_poll; + phba->cfg_poll = lpfc_poll; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); @@ -5158,6 +5855,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport) lpfc_restrict_login_init(vport, lpfc_restrict_login); lpfc_fcp_class_init(vport, lpfc_fcp_class); lpfc_use_adisc_init(vport, lpfc_use_adisc); + lpfc_first_burst_size_init(vport, lpfc_first_burst_size); lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time); lpfc_fdmi_on_init(vport, lpfc_fdmi_on); lpfc_discovery_threads_init(vport, lpfc_discovery_threads); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 32d5683e618..5b5c825d957 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2012 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -64,18 +64,14 @@ struct lpfc_bsg_event { struct list_head events_to_get; struct list_head events_to_see; - /* job waiting for this event to finish */ - struct fc_bsg_job *set_job; + /* driver data associated with the job */ + void *dd_data; }; struct lpfc_bsg_iocb { struct lpfc_iocbq *cmdiocbq; - struct lpfc_iocbq *rspiocbq; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *rmp; struct lpfc_nodelist *ndlp; - - /* job waiting for this iocb to finish */ - struct fc_bsg_job *set_job; }; struct lpfc_bsg_mbox { @@ -86,20 +82,13 @@ struct lpfc_bsg_mbox { uint32_t mbOffset; /* from app */ uint32_t inExtWLen; /* from app */ uint32_t outExtWLen; /* from app */ - - /* job waiting for this mbox command to finish */ - struct fc_bsg_job *set_job; }; #define MENLO_DID 0x0000FC0E struct lpfc_bsg_menlo { struct lpfc_iocbq *cmdiocbq; - struct lpfc_iocbq *rspiocbq; - struct lpfc_dmabuf *bmp; - - /* job waiting for this iocb to finish */ - struct fc_bsg_job *set_job; + struct lpfc_dmabuf *rmp; }; #define TYPE_EVT 1 @@ -108,6 +97,7 @@ struct lpfc_bsg_menlo { #define TYPE_MENLO 4 struct bsg_job_data { uint32_t type; + struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */ union { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; @@ -141,6 +131,149 @@ struct lpfc_dmabufext { uint32_t flag; }; +static void +lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) +{ + struct lpfc_dmabuf *mlast, *next_mlast; + + if (mlist) { + list_for_each_entry_safe(mlast, next_mlast, &mlist->list, + list) { + lpfc_mbuf_free(phba, mlast->virt, mlast->phys); + list_del(&mlast->list); + kfree(mlast); + } + lpfc_mbuf_free(phba, mlist->virt, mlist->phys); + kfree(mlist); + } + return; +} + +static struct lpfc_dmabuf * +lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size, + int outbound_buffers, struct ulp_bde64 *bpl, + int *bpl_entries) +{ + struct lpfc_dmabuf *mlist = NULL; + struct lpfc_dmabuf *mp; + unsigned int bytes_left = size; + + /* Verify we can support the size specified */ + if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE))) + return NULL; + + /* Determine the number of dma buffers to allocate */ + *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 : + size/LPFC_BPL_SIZE); + + /* Allocate dma buffer and place in BPL passed */ + while (bytes_left) { + /* Allocate dma buffer */ + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + if (mlist) + lpfc_free_bsg_buffers(phba, mlist); + return NULL; + } + + INIT_LIST_HEAD(&mp->list); + mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); + + if (!mp->virt) { + kfree(mp); + if (mlist) + lpfc_free_bsg_buffers(phba, mlist); + return NULL; + } + + /* Queue it to a linked list */ + if (!mlist) + mlist = mp; + else + list_add_tail(&mp->list, &mlist->list); + + /* Add buffer to buffer pointer list */ + if (outbound_buffers) + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + else + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); + bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); + bpl->tus.f.bdeSize = (uint16_t) + (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE : + bytes_left); + bytes_left -= bpl->tus.f.bdeSize; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + bpl++; + } + return mlist; +} + +static unsigned int +lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, + struct fc_bsg_buffer *bsg_buffers, + unsigned int bytes_to_transfer, int to_buffers) +{ + + struct lpfc_dmabuf *mp; + unsigned int transfer_bytes, bytes_copied = 0; + unsigned int sg_offset, dma_offset; + unsigned char *dma_address, *sg_address; + LIST_HEAD(temp_list); + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + bool sg_valid; + + list_splice_init(&dma_buffers->list, &temp_list); + list_add(&dma_buffers->list, &temp_list); + sg_offset = 0; + if (to_buffers) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, + sg_flags); + local_irq_save(flags); + sg_valid = sg_miter_next(&miter); + list_for_each_entry(mp, &temp_list, list) { + dma_offset = 0; + while (bytes_to_transfer && sg_valid && + (dma_offset < LPFC_BPL_SIZE)) { + dma_address = mp->virt + dma_offset; + if (sg_offset) { + /* Continue previous partial transfer of sg */ + sg_address = miter.addr + sg_offset; + transfer_bytes = miter.length - sg_offset; + } else { + sg_address = miter.addr; + transfer_bytes = miter.length; + } + if (bytes_to_transfer < transfer_bytes) + transfer_bytes = bytes_to_transfer; + if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset)) + transfer_bytes = LPFC_BPL_SIZE - dma_offset; + if (to_buffers) + memcpy(dma_address, sg_address, transfer_bytes); + else + memcpy(sg_address, dma_address, transfer_bytes); + dma_offset += transfer_bytes; + sg_offset += transfer_bytes; + bytes_to_transfer -= transfer_bytes; + bytes_copied += transfer_bytes; + if (sg_offset >= miter.length) { + sg_offset = 0; + sg_valid = sg_miter_next(&miter); + } + } + } + sg_miter_stop(&miter); + local_irq_restore(flags); + list_del_init(&dma_buffers->list); + list_splice(&temp_list, &dma_buffers->list); + return bytes_copied; +} + /** * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler * @phba: Pointer to HBA context object. @@ -166,62 +299,77 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct fc_bsg_job *job; IOCB_t *rsp; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_nodelist *ndlp; struct lpfc_bsg_iocb *iocb; unsigned long flags; + unsigned int rsp_size; int rc = 0; + dd_data = cmdiocbq->context1; + + /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); - dd_data = cmdiocbq->context2; - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - lpfc_sli_release_iocbq(phba, cmdiocbq); - return; + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - iocb = &dd_data->context_un.iocb; - job = iocb->set_job; - job->dd_data = NULL; /* so timeout handler does not reply */ + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); - bmp = iocb->bmp; + iocb = &dd_data->context_un.iocb; + ndlp = iocb->ndlp; + rmp = iocb->rmp; + cmp = cmdiocbq->context2; + bmp = cmdiocbq->context3; rsp = &rspiocbq->iocb; - ndlp = cmdiocbq->context1; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + /* Copy the completed data or set the error status */ - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { - case IOERR_SEQUENCE_TIMEOUT: - rc = -ETIMEDOUT; - break; - case IOERR_INVALID_RPI: - rc = -EFAULT; - break; - default: + if (job) { + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { rc = -EACCES; - break; } - } else - rc = -EACCES; - } else - job->reply->reply_payload_rcv_len = - rsp->un.genreq64.bdl.bdeSize; + } else { + rsp_size = rsp->un.genreq64.bdl.bdeSize; + job->reply->reply_payload_rcv_len = + lpfc_bsg_copy_data(rmp, &job->reply_payload, + rsp_size, 0); + } + } + lpfc_free_bsg_buffers(phba, cmp); + lpfc_free_bsg_buffers(phba, rmp); lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); - kfree(bmp); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if the job is still active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } return; } @@ -240,13 +388,11 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) uint32_t timeout; struct lpfc_iocbq *cmdiocbq = NULL; IOCB_t *cmd; - struct lpfc_dmabuf *bmp = NULL; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; int request_nseg; int reply_nseg; - struct scatterlist *sgel = NULL; - int numbde; - dma_addr_t busaddr; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; int rc = 0; int iocb_stat; @@ -268,54 +414,50 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) goto no_ndlp; } - bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!bmp) { - rc = -ENOMEM; - goto free_ndlp; - } - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) { rc = -ENODEV; - goto free_bmp; + goto free_ndlp; } cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { rc = -ENOMEM; - goto free_bmp; + goto free_ndlp; } cmd = &cmdiocbq->iocb; + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + goto free_cmdiocbq; + } bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); if (!bmp->virt) { rc = -ENOMEM; - goto free_cmdiocbq; + goto free_bmp; } INIT_LIST_HEAD(&bmp->list); + bpl = (struct ulp_bde64 *) bmp->virt; - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &request_nseg); + if (!cmp) { + rc = -ENOMEM; + goto free_bmp; } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); - reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + bpl += request_nseg; + reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; + rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, + bpl, &reply_nseg); + if (!rmp) { + rc = -ENOMEM; + goto free_cmp; } cmd->un.genreq64.bdl.ulpIoTag32 = 0; @@ -343,17 +485,21 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) cmd->ulpTimeout = timeout; cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; - cmdiocbq->context1 = ndlp; - cmdiocbq->context2 = dd_data; + cmdiocbq->context1 = dd_data; + cmdiocbq->context2 = cmp; + cmdiocbq->context3 = bmp; + cmdiocbq->context_un.ndlp = ndlp; dd_data->type = TYPE_IOCB; + dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; - dd_data->context_un.iocb.set_job = job; - dd_data->context_un.iocb.bmp = bmp; + dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.rmp = rmp; + job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { rc = -EIO ; - goto free_cmdiocbq; + goto free_rmp; } creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); writel(creg_val, phba->HCregaddr); @@ -361,26 +507,35 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) } iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - if (iocb_stat == IOCB_SUCCESS) + + if (iocb_stat == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed yet */ + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ - else if (iocb_stat == IOCB_BUSY) + } else if (iocb_stat == IOCB_BUSY) { rc = -EAGAIN; - else + } else { rc = -EIO; - + } /* iocb failed so cleanup */ - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + job->dd_data = NULL; -free_cmdiocbq: - lpfc_sli_release_iocbq(phba, cmdiocbq); +free_rmp: + lpfc_free_bsg_buffers(phba, rmp); +free_cmp: + lpfc_free_bsg_buffers(phba, cmp); free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); +free_cmdiocbq: + lpfc_sli_release_iocbq(phba, cmdiocbq); free_ndlp: lpfc_nlp_put(ndlp); no_ndlp: @@ -418,67 +573,73 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, struct fc_bsg_job *job; IOCB_t *rsp; struct lpfc_nodelist *ndlp; - struct lpfc_dmabuf *pbuflist = NULL; + struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL; struct fc_bsg_ctels_reply *els_reply; uint8_t *rjt_data; unsigned long flags; + unsigned int rsp_size; int rc = 0; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = cmdiocbq->context1; - /* normal completion and timeout crossed paths, already done */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; + ndlp = dd_data->context_un.iocb.ndlp; + cmdiocbq->context1 = ndlp; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - cmdiocbq->iocb_flag |= LPFC_IO_WAKE; - if (cmdiocbq->context2 && rspiocbq) - memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, - &rspiocbq->iocb, sizeof(IOCB_t)); + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); - job = dd_data->context_un.iocb.set_job; - cmdiocbq = dd_data->context_un.iocb.cmdiocbq; - rspiocbq = dd_data->context_un.iocb.rspiocbq; rsp = &rspiocbq->iocb; - ndlp = dd_data->context_un.iocb.ndlp; + pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; + prsp = (struct lpfc_dmabuf *)pcmd->list.next; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + /* Copy the completed job data or determine the job status if job is + * still active + */ - if (job->reply->result == -EAGAIN) - rc = -EAGAIN; - else if (rsp->ulpStatus == IOSTAT_SUCCESS) - job->reply->reply_payload_rcv_len = - rsp->un.elsreq64.bdl.bdeSize; - else if (rsp->ulpStatus == IOSTAT_LS_RJT) { - job->reply->reply_payload_rcv_len = - sizeof(struct fc_bsg_ctels_reply); - /* LS_RJT data returned in word 4 */ - rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; - els_reply = &job->reply->reply_data.ctels_reply; - els_reply->status = FC_CTELS_STATUS_REJECT; - els_reply->rjt_data.action = rjt_data[3]; - els_reply->rjt_data.reason_code = rjt_data[2]; - els_reply->rjt_data.reason_explanation = rjt_data[1]; - els_reply->rjt_data.vendor_unique = rjt_data[0]; - } else - rc = -EIO; + if (job) { + if (rsp->ulpStatus == IOSTAT_SUCCESS) { + rsp_size = rsp->un.elsreq64.bdl.bdeSize; + job->reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + prsp->virt, + rsp_size); + } else if (rsp->ulpStatus == IOSTAT_LS_RJT) { + job->reply->reply_payload_rcv_len = + sizeof(struct fc_bsg_ctels_reply); + /* LS_RJT data returned in word 4 */ + rjt_data = (uint8_t *)&rsp->un.ulpWord[4]; + els_reply = &job->reply->reply_data.ctels_reply; + els_reply->status = FC_CTELS_STATUS_REJECT; + els_reply->rjt_data.action = rjt_data[3]; + els_reply->rjt_data.reason_code = rjt_data[2]; + els_reply->rjt_data.reason_explanation = rjt_data[1]; + els_reply->rjt_data.vendor_unique = rjt_data[0]; + } else { + rc = -EIO; + } + } - pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; - lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); - lpfc_sli_release_iocbq(phba, rspiocbq); - lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); + lpfc_els_free_iocb(phba, cmdiocbq); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - job->dd_data = NULL; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if the job is still active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } return; } @@ -496,26 +657,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) uint32_t elscmd; uint32_t cmdsize; uint32_t rspsize; - struct lpfc_iocbq *rspiocbq; struct lpfc_iocbq *cmdiocbq; - IOCB_t *rsp; uint16_t rpi = 0; - struct lpfc_dmabuf *pcmd; - struct lpfc_dmabuf *prsp; - struct lpfc_dmabuf *pbuflist = NULL; - struct ulp_bde64 *bpl; - int request_nseg; - int reply_nseg; - struct scatterlist *sgel = NULL; - int numbde; - dma_addr_t busaddr; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; int rc = 0; /* in case no data is transferred */ job->reply->reply_payload_rcv_len = 0; + /* verify the els command is not greater than the + * maximum ELS transfer size. + */ + + if (job->request_payload.payload_len > FCELSSIZE) { + rc = -EINVAL; + goto no_dd_data; + } + /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { @@ -525,88 +685,51 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) goto no_dd_data; } - if (!lpfc_nlp_get(ndlp)) { - rc = -ENODEV; - goto free_dd_data; - } - elscmd = job->request->rqst_data.r_els.els_code; cmdsize = job->request_payload.payload_len; rspsize = job->reply_payload.payload_len; - rspiocbq = lpfc_sli_get_iocbq(phba); - if (!rspiocbq) { - lpfc_nlp_put(ndlp); - rc = -ENOMEM; + + if (!lpfc_nlp_get(ndlp)) { + rc = -ENODEV; goto free_dd_data; } - rsp = &rspiocbq->iocb; - rpi = ndlp->nlp_rpi; + /* We will use the allocated dma buffers by prep els iocb for command + * and response to ensure if the job times out and the request is freed, + * we won't be dma into memory that is no longer allocated to for the + * request. + */ cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, elscmd); if (!cmdiocbq) { rc = -EIO; - goto free_rspiocbq; + goto release_ndlp; } - /* prep els iocb set context1 to the ndlp, context2 to the command - * dmabuf, context3 holds the data dmabuf - */ - pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2; - prsp = (struct lpfc_dmabuf *) pcmd->list.next; - lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); - kfree(pcmd); - lpfc_mbuf_free(phba, prsp->virt, prsp->phys); - kfree(prsp); - cmdiocbq->context2 = NULL; - - pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3; - bpl = (struct ulp_bde64 *) pbuflist->virt; - - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; - } + rpi = ndlp->nlp_rpi; + + /* Transfer the request payload to allocated command dma buffer */ + + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt, + cmdsize); - reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; - } - cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = - (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); if (phba->sli_rev == LPFC_SLI_REV4) cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi]; else cmdiocbq->iocb.ulpContext = rpi; cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; - cmdiocbq->context1 = NULL; - cmdiocbq->context2 = NULL; - - cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; cmdiocbq->context1 = dd_data; cmdiocbq->context_un.ndlp = ndlp; - cmdiocbq->context2 = rspiocbq; + cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp; dd_data->type = TYPE_IOCB; + dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; - dd_data->context_un.iocb.rspiocbq = rspiocbq; - dd_data->context_un.iocb.set_job = job; - dd_data->context_un.iocb.bmp = NULL; dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.rmp = NULL; + job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { @@ -617,27 +740,33 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) writel(creg_val, phba->HCregaddr); readl(phba->HCregaddr); /* flush */ } + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); - lpfc_nlp_put(ndlp); - if (rc == IOCB_SUCCESS) + + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ - else if (rc == IOCB_BUSY) + } else if (rc == IOCB_BUSY) { rc = -EAGAIN; - else + } else { rc = -EIO; + } -linkdown_err: - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - - lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys); + /* iocb failed so cleanup */ + job->dd_data = NULL; - lpfc_sli_release_iocbq(phba, cmdiocbq); +linkdown_err: + cmdiocbq->context1 = ndlp; + lpfc_els_free_iocb(phba, cmdiocbq); -free_rspiocbq: - lpfc_sli_release_iocbq(phba, rspiocbq); +release_ndlp: + lpfc_nlp_put(ndlp); free_dd_data: kfree(dd_data); @@ -680,6 +809,7 @@ lpfc_bsg_event_free(struct kref *kref) kfree(ed); } + kfree(evt->dd_data); kfree(evt); } @@ -723,6 +853,7 @@ lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id) evt->req_id = ev_req_id; evt->reg_id = ev_reg_id; evt->wait_time_stamp = jiffies; + evt->dd_data = NULL; init_waitqueue_head(&evt->wq); kref_init(&evt->kref); return evt; @@ -790,6 +921,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_hbq_entry *hbqe; struct lpfc_sli_ct_request *ct_req; struct fc_bsg_job *job = NULL; + struct bsg_job_data *dd_data = NULL; unsigned long flags; int size = 0; @@ -986,10 +1118,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } list_move(evt->events_to_see.prev, &evt->events_to_get); - lpfc_bsg_event_unref(evt); - job = evt->set_job; - evt->set_job = NULL; + dd_data = (struct bsg_job_data *)evt->dd_data; + job = dd_data->set_job; + dd_data->set_job = NULL; + lpfc_bsg_event_unref(evt); if (job) { job->reply->reply_payload_rcv_len = size; /* make error code available to userspace */ @@ -1078,14 +1211,6 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) goto job_error; } - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (dd_data == NULL) { - lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, - "2734 Failed allocation of dd_data\n"); - rc = -ENOMEM; - goto job_error; - } - event_req = (struct set_ct_event *) job->request->rqst_data.h_vendor.vendor_cmd; ev_mask = ((uint32_t)(unsigned long)event_req->type_mask & @@ -1095,6 +1220,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) if (evt->reg_id == event_req->ev_reg_id) { lpfc_bsg_event_ref(evt); evt->wait_time_stamp = jiffies; + dd_data = (struct bsg_job_data *)evt->dd_data; break; } } @@ -1102,6 +1228,13 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) if (&evt->node == &phba->ct_ev_waiters) { /* no event waiting struct yet - first call */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (dd_data == NULL) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2734 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto job_error; + } evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id, event_req->ev_req_id); if (!evt) { @@ -1111,7 +1244,10 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) rc = -ENOMEM; goto job_error; } - + dd_data->type = TYPE_EVT; + dd_data->set_job = NULL; + dd_data->context_un.evt = evt; + evt->dd_data = (void *)dd_data; spin_lock_irqsave(&phba->ct_ev_lock, flags); list_add(&evt->node, &phba->ct_ev_waiters); lpfc_bsg_event_ref(evt); @@ -1121,9 +1257,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job) spin_lock_irqsave(&phba->ct_ev_lock, flags); evt->waiting = 1; - dd_data->type = TYPE_EVT; - dd_data->context_un.evt = evt; - evt->set_job = job; /* for unsolicited command */ + dd_data->set_job = job; /* for unsolicited command */ job->dd_data = dd_data; /* for fc transport timeout callback*/ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return 0; /* call job done later */ @@ -1147,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) struct lpfc_hba *phba = vport->phba; struct get_ct_event *event_req; struct get_ct_event_reply *event_reply; - struct lpfc_bsg_event *evt; + struct lpfc_bsg_event *evt, *evt_next; struct event_data *evt_dat = NULL; unsigned long flags; uint32_t rc = 0; @@ -1167,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) event_reply = (struct get_ct_event_reply *) job->reply->reply_data.vendor_reply.vendor_rsp; spin_lock_irqsave(&phba->ct_ev_lock, flags); - list_for_each_entry(evt, &phba->ct_ev_waiters, node) { + list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { if (evt->reg_id == event_req->ev_reg_id) { if (list_empty(&evt->events_to_get)) break; @@ -1252,57 +1386,69 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct fc_bsg_job *job; IOCB_t *rsp; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *bmp, *cmp; struct lpfc_nodelist *ndlp; unsigned long flags; int rc = 0; + dd_data = cmdiocbq->context1; + + /* Determine if job has been aborted */ spin_lock_irqsave(&phba->ct_ev_lock, flags); - dd_data = cmdiocbq->context2; - /* normal completion and timeout crossed paths, already done */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Close the timeout handler abort window */ + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; + spin_unlock_irqrestore(&phba->hbalock, flags); - job = dd_data->context_un.iocb.set_job; - bmp = dd_data->context_un.iocb.bmp; - rsp = &rspiocbq->iocb; ndlp = dd_data->context_un.iocb.ndlp; + cmp = cmdiocbq->context2; + bmp = cmdiocbq->context3; + rsp = &rspiocbq->iocb; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); + /* Copy the completed job data or set the error status */ - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { - case IOERR_SEQUENCE_TIMEOUT: - rc = -ETIMEDOUT; - break; - case IOERR_INVALID_RPI: - rc = -EFAULT; - break; - default: + if (job) { + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { rc = -EACCES; - break; } - } else - rc = -EACCES; - } else - job->reply->reply_payload_rcv_len = - rsp->un.genreq64.bdl.bdeSize; + } else { + job->reply->reply_payload_rcv_len = 0; + } + } + lpfc_free_bsg_buffers(phba, cmp); lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + kfree(bmp); lpfc_sli_release_iocbq(phba, cmdiocbq); lpfc_nlp_put(ndlp); - kfree(bmp); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - job->dd_data = NULL; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if the job is still active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } return; } @@ -1316,13 +1462,15 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, **/ static int lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, - struct lpfc_dmabuf *bmp, int num_entry) + struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp, + int num_entry) { IOCB_t *icmd; struct lpfc_iocbq *ctiocb = NULL; int rc = 0; struct lpfc_nodelist *ndlp = NULL; struct bsg_job_data *dd_data; + unsigned long flags; uint32_t creg_val; /* allocate our bsg tracking structure */ @@ -1377,7 +1525,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, /* Check if the ndlp is active */ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - rc = -IOCB_ERROR; + rc = IOCB_ERROR; goto issue_ct_rsp_exit; } @@ -1385,7 +1533,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, * we respond */ if (!lpfc_nlp_get(ndlp)) { - rc = -IOCB_ERROR; + rc = IOCB_ERROR; goto issue_ct_rsp_exit; } @@ -1407,17 +1555,18 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, ctiocb->iocb_cmpl = NULL; ctiocb->iocb_flag |= LPFC_IO_LIBDFC; ctiocb->vport = phba->pport; + ctiocb->context1 = dd_data; + ctiocb->context2 = cmp; ctiocb->context3 = bmp; - + ctiocb->context_un.ndlp = ndlp; ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; - ctiocb->context2 = dd_data; - ctiocb->context1 = ndlp; + dd_data->type = TYPE_IOCB; + dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = ctiocb; - dd_data->context_un.iocb.rspiocbq = NULL; - dd_data->context_un.iocb.set_job = job; - dd_data->context_un.iocb.bmp = bmp; dd_data->context_un.iocb.ndlp = ndlp; + dd_data->context_un.iocb.rmp = NULL; + job->dd_data = dd_data; if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) { @@ -1431,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); - if (rc == IOCB_SUCCESS) + if (rc == IOCB_SUCCESS) { + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O had not been completed/released */ + if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { + /* open up abort window to timeout handler */ + ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; + } + spin_unlock_irqrestore(&phba->hbalock, flags); return 0; /* done for now */ + } + + /* iocb failed so cleanup */ + job->dd_data = NULL; issue_ct_rsp_exit: lpfc_sli_release_iocbq(phba, ctiocb); @@ -1454,11 +1614,8 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *) job->request->rqst_data.h_vendor.vendor_cmd; struct ulp_bde64 *bpl; - struct lpfc_dmabuf *bmp = NULL; - struct scatterlist *sgel = NULL; - int request_nseg; - int numbde; - dma_addr_t busaddr; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL; + int bpl_entries; uint32_t tag = mgmt_resp->tag; unsigned long reqbfrcnt = (unsigned long)job->request_payload.payload_len; @@ -1486,30 +1643,28 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job) INIT_LIST_HEAD(&bmp->list); bpl = (struct ulp_bde64 *) bmp->virt; - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64)); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &bpl_entries); + if (!cmp) { + rc = -ENOMEM; + goto send_mgmt_rsp_free_bmp; } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); - rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg); + rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries); if (rc == IOCB_SUCCESS) return 0; /* done for now */ - /* TBD need to handle a timeout */ - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); rc = -EACCES; - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + + lpfc_free_bsg_buffers(phba, cmp); send_mgmt_rsp_free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); send_mgmt_rsp_exit: /* make error code available to userspace */ @@ -1559,7 +1714,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba) scsi_block_requests(shost); } - while (pring->txcmplq_cnt) { + while (!list_empty(&pring->txcmplq)) { if (i++ > 500) /* wait up to 5 seconds */ break; msleep(10); @@ -2392,7 +2547,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, struct lpfc_sli_ct_request *ctreq = NULL; int ret_val = 0; int time_left; - int iocb_stat = 0; + int iocb_stat = IOCB_SUCCESS; unsigned long flags; *txxri = 0; @@ -2468,12 +2623,13 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; + cmdiocbq->iocb_cmpl = NULL; iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - if (iocb_stat) { + if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) { ret_val = -EIO; goto err_get_xri_exit; } @@ -2483,7 +2639,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -2520,7 +2677,7 @@ err_get_xri_exit: * @phba: Pointer to HBA context object * * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and. - * retruns the pointer to the buffer. + * returns the pointer to the buffer. **/ static struct lpfc_dmabuf * lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba) @@ -2856,7 +3013,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) uint8_t *ptr = NULL, *rx_databuf = NULL; int rc = 0; int time_left; - int iocb_stat; + int iocb_stat = IOCB_SUCCESS; unsigned long flags; void *dataout = NULL; uint32_t total_mem; @@ -3042,12 +3199,14 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) } cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; cmdiocbq->vport = phba->pport; + cmdiocbq->iocb_cmpl = NULL; iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq, (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT); - if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && - (rsp->ulpStatus != IOCB_SUCCESS))) { + if ((iocb_stat != IOCB_SUCCESS) || + ((phba->sli_rev < LPFC_SLI_REV4) && + (rsp->ulpStatus != IOSTAT_SUCCESS))) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "3126 Failed loopback test issue iocb: " "iocb_stat:x%x\n", iocb_stat); @@ -3058,7 +3217,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; @@ -3101,7 +3261,7 @@ err_loopback_test_exit: lpfc_bsg_event_unref(evt); /* delete */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - if (cmdiocbq != NULL) + if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT)) lpfc_sli_release_iocbq(phba, cmdiocbq); if (rspiocbq != NULL) @@ -3193,13 +3353,7 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) unsigned long flags; uint8_t *pmb, *pmb_buf; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = pmboxq->context1; - /* job already timed out? */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; - } /* * The outgoing buffer is readily referred from the dma buffer, @@ -3209,29 +3363,33 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; memcpy(pmb_buf, pmb, sizeof(MAILBOX_t)); - job = dd_data->context_un.mbox.set_job; + /* Determine if job has been aborted */ + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Copy the mailbox data to the job if it is still active */ + if (job) { size = job->reply_payload.payload_len; job->reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); - /* need to hold the lock until we set job->dd_data to NULL - * to hold off the timeout handler returning to the mid-layer - * while we are still processing the job. - */ - job->dd_data = NULL; - dd_data->context_un.mbox.set_job = NULL; - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - } else { - dd_data->context_un.mbox.set_job = NULL; - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); } + dd_data->set_job = NULL; mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers); kfree(dd_data); + /* Complete the job if the job is still active */ + if (job) { job->reply->result = 0; job->job_done(job); @@ -3286,6 +3444,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, case MBX_DOWN_LOAD: case MBX_UPDATE_CFG: case MBX_KILL_BOARD: + case MBX_READ_TOPOLOGY: case MBX_LOAD_AREA: case MBX_LOAD_EXP_ROM: case MBX_BEACON: @@ -3316,7 +3475,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, } break; case MBX_READ_SPARM64: - case MBX_READ_TOPOLOGY: case MBX_REG_LOGIN: case MBX_REG_LOGIN64: case MBX_CONFIG_PORT: @@ -3377,19 +3535,22 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) struct lpfc_sli_config_mbox *sli_cfg_mbx; uint8_t *pmbx; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = pmboxq->context1; - /* has the job already timed out? */ - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - job = NULL; - goto job_done_out; + + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); /* * The outgoing buffer is readily referred from the dma buffer, * just need to get header part from mailboxq structure. */ + pmb = (uint8_t *)&pmboxq->u.mb; pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb; /* Copy the byte swapped response mailbox back to the user */ @@ -3406,21 +3567,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len); } - job = dd_data->context_un.mbox.set_job; + /* Complete the job if the job is still active */ + if (job) { size = job->reply_payload.payload_len; job->reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, pmb_buf, size); + /* result for successful */ job->reply->result = 0; - job->dd_data = NULL; - /* need to hold the lock util we set job->dd_data to NULL - * to hold off the timeout handler from midlayer to take - * any action. - */ - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2937 SLI_CONFIG ext-buffer maibox command " "(x%x/x%x) complete bsg job done, bsize:%d\n", @@ -3431,20 +3589,18 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) phba->mbox_ext_buf_ctx.mboxType, dma_ebuf, sta_pos_addr, phba->mbox_ext_buf_ctx.mbx_dmabuf, 0); - } else - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - -job_done_out: - if (!job) + } else { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2938 SLI_CONFIG ext-buffer maibox " "command (x%x/x%x) failure, rc:x%x\n", phba->mbox_ext_buf_ctx.nembType, phba->mbox_ext_buf_ctx.mboxType, rc); + } + + /* state change */ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE; kfree(dd_data); - return job; } @@ -3461,8 +3617,10 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct fc_bsg_job *job; + job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); + /* handle the BSG job with mailbox command */ - if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) + if (!job) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, @@ -3470,15 +3628,13 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); - job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); - if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1) lpfc_bsg_mbox_ext_session_reset(phba); /* free base driver mailbox structure memory */ mempool_free(pmboxq, phba->mbox_mem_pool); - /* complete the bsg job if we have it */ + /* if the job is still active, call job done */ if (job) job->job_done(job); @@ -3498,8 +3654,10 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct fc_bsg_job *job; + job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); + /* handle the BSG job with the mailbox command */ - if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS) + if (!job) pmboxq->u.mb.mbxStatus = MBXERR_ERROR; lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, @@ -3507,13 +3665,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) "complete, ctxState:x%x, mbxStatus:x%x\n", phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus); - job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq); - /* free all memory, including dma buffers */ mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_mbox_ext_session_reset(phba); - /* complete the bsg job if we have it */ + /* if the job is still active, call job done */ if (job) job->job_done(job); @@ -3759,9 +3915,9 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, /* context fields to callback function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; - dd_data->context_un.mbox.set_job = job; job->dd_data = dd_data; /* state change */ @@ -3928,14 +4084,14 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, /* context fields to callback function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx; - dd_data->context_un.mbox.set_job = job; job->dd_data = dd_data; /* state change */ - phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; + phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT; rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, @@ -3951,6 +4107,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job, } /* wait for additoinal external buffers */ + job->reply->result = 0; job->job_done(job); return SLI_CONFIG_HANDLED; @@ -3996,6 +4153,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, if (subsys == SLI_CONFIG_SUBSYS_FCOE) { switch (opcode) { case FCOE_OPCODE_READ_FCF: + case FCOE_OPCODE_GET_DPORT_RESULTS: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2957 Handled SLI_CONFIG " "subsys_fcoe, opcode:x%x\n", @@ -4004,6 +4162,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, nemb_mse, dmabuf); break; case FCOE_OPCODE_ADD_FCF: + case FCOE_OPCODE_SET_DPORT_MODE: + case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE: lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, "2958 Handled SLI_CONFIG " "subsys_fcoe, opcode:x%x\n", @@ -4268,9 +4428,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job, /* context fields to callback function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf; - dd_data->context_un.mbox.set_job = job; job->dd_data = dd_data; /* state change */ @@ -4455,7 +4615,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, uint8_t *from; uint32_t size; - /* in case no data is transferred */ job->reply->reply_payload_rcv_len = 0; @@ -4681,9 +4840,9 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, /* setup context field to pass wait_queue pointer to wake function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; + dd_data->set_job = job; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx; - dd_data->context_un.mbox.set_job = job; dd_data->context_un.mbox.ext = ext; dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; @@ -4741,7 +4900,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) if (job->request_len < sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) { lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, - "2737 Mix-and-match backward compability " + "2737 Mix-and-match backward compatibility " "between MBOX_REQ old size:%d and " "new request size:%d\n", (int)(job->request_len - @@ -4797,75 +4956,79 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, struct bsg_job_data *dd_data; struct fc_bsg_job *job; IOCB_t *rsp; - struct lpfc_dmabuf *bmp; + struct lpfc_dmabuf *bmp, *cmp, *rmp; struct lpfc_bsg_menlo *menlo; unsigned long flags; struct menlo_response *menlo_resp; + unsigned int rsp_size; int rc = 0; - spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = cmdiocbq->context1; - if (!dd_data) { - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return; - } - + cmp = cmdiocbq->context2; + bmp = cmdiocbq->context3; menlo = &dd_data->context_un.menlo; - job = menlo->set_job; - job->dd_data = NULL; /* so timeout handler does not reply */ - - spin_lock(&phba->hbalock); - cmdiocbq->iocb_flag |= LPFC_IO_WAKE; - if (cmdiocbq->context2 && rspiocbq) - memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, - &rspiocbq->iocb, sizeof(IOCB_t)); - spin_unlock(&phba->hbalock); - - bmp = menlo->bmp; - rspiocbq = menlo->rspiocbq; + rmp = menlo->rmp; rsp = &rspiocbq->iocb; - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + /* Determine if job has been aborted */ + spin_lock_irqsave(&phba->ct_ev_lock, flags); + job = dd_data->set_job; + if (job) { + /* Prevent timeout handling from trying to abort job */ + job->dd_data = NULL; + } + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - /* always return the xri, this would be used in the case - * of a menlo download to allow the data to be sent as a continuation - * of the exchange. - */ - menlo_resp = (struct menlo_response *) - job->reply->reply_data.vendor_reply.vendor_rsp; - menlo_resp->xri = rsp->ulpContext; - if (rsp->ulpStatus) { - if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { - switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { - case IOERR_SEQUENCE_TIMEOUT: - rc = -ETIMEDOUT; - break; - case IOERR_INVALID_RPI: - rc = -EFAULT; - break; - default: + /* Copy the job data or set the failing status for the job */ + + if (job) { + /* always return the xri, this would be used in the case + * of a menlo download to allow the data to be sent as a + * continuation of the exchange. + */ + + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + menlo_resp->xri = rsp->ulpContext; + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else { rc = -EACCES; - break; } - } else - rc = -EACCES; - } else - job->reply->reply_payload_rcv_len = - rsp->un.genreq64.bdl.bdeSize; + } else { + rsp_size = rsp->un.genreq64.bdl.bdeSize; + job->reply->reply_payload_rcv_len = + lpfc_bsg_copy_data(rmp, &job->reply_payload, + rsp_size, 0); + } + + } - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - lpfc_sli_release_iocbq(phba, rspiocbq); lpfc_sli_release_iocbq(phba, cmdiocbq); + lpfc_free_bsg_buffers(phba, cmp); + lpfc_free_bsg_buffers(phba, rmp); + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); kfree(dd_data); - /* make error code available to userspace */ - job->reply->result = rc; - /* complete the job back to userspace */ - job->job_done(job); - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + /* Complete the job if active */ + + if (job) { + job->reply->result = rc; + job->job_done(job); + } + return; } @@ -4883,17 +5046,14 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) { struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_iocbq *cmdiocbq, *rspiocbq; - IOCB_t *cmd, *rsp; + struct lpfc_iocbq *cmdiocbq; + IOCB_t *cmd; int rc = 0; struct menlo_command *menlo_cmd; struct menlo_response *menlo_resp; - struct lpfc_dmabuf *bmp = NULL; + struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL; int request_nseg; int reply_nseg; - struct scatterlist *sgel = NULL; - int numbde; - dma_addr_t busaddr; struct bsg_job_data *dd_data; struct ulp_bde64 *bpl = NULL; @@ -4948,50 +5108,38 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) goto free_dd; } - cmdiocbq = lpfc_sli_get_iocbq(phba); - if (!cmdiocbq) { + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { rc = -ENOMEM; goto free_bmp; } - rspiocbq = lpfc_sli_get_iocbq(phba); - if (!rspiocbq) { - rc = -ENOMEM; - goto free_cmdiocbq; - } - - rsp = &rspiocbq->iocb; + INIT_LIST_HEAD(&bmp->list); - bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); - if (!bmp->virt) { + bpl = (struct ulp_bde64 *)bmp->virt; + request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64); + cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len, + 1, bpl, &request_nseg); + if (!cmp) { rc = -ENOMEM; - goto free_rspiocbq; + goto free_bmp; } + lpfc_bsg_copy_data(cmp, &job->request_payload, + job->request_payload.payload_len, 1); - INIT_LIST_HEAD(&bmp->list); - bpl = (struct ulp_bde64 *) bmp->virt; - request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + bpl += request_nseg; + reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg; + rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0, + bpl, &reply_nseg); + if (!rmp) { + rc = -ENOMEM; + goto free_cmp; } - reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { - busaddr = sg_dma_address(sgel); - bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; - bpl->tus.f.bdeSize = sg_dma_len(sgel); - bpl->tus.w = cpu_to_le32(bpl->tus.w); - bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); - bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); - bpl++; + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = -ENOMEM; + goto free_rmp; } cmd = &cmdiocbq->iocb; @@ -5013,11 +5161,10 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) cmdiocbq->vport = phba->pport; /* We want the firmware to timeout before we do */ cmd->ulpTimeout = MENLO_TIMEOUT - 5; - cmdiocbq->context3 = bmp; - cmdiocbq->context2 = rspiocbq; cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; cmdiocbq->context1 = dd_data; - cmdiocbq->context2 = rspiocbq; + cmdiocbq->context2 = cmp; + cmdiocbq->context3 = bmp; if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { cmd->ulpCommand = CMD_GEN_REQUEST64_CR; cmd->ulpPU = MENLO_PU; /* 3 */ @@ -5031,29 +5178,25 @@ lpfc_menlo_cmd(struct fc_bsg_job *job) } dd_data->type = TYPE_MENLO; + dd_data->set_job = job; dd_data->context_un.menlo.cmdiocbq = cmdiocbq; - dd_data->context_un.menlo.rspiocbq = rspiocbq; - dd_data->context_un.menlo.set_job = job; - dd_data->context_un.menlo.bmp = bmp; + dd_data->context_un.menlo.rmp = rmp; + job->dd_data = dd_data; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, MENLO_TIMEOUT - 5); if (rc == IOCB_SUCCESS) return 0; /* done for now */ - /* iocb failed so cleanup */ - pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, - job->request_payload.sg_cnt, DMA_TO_DEVICE); - pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, - job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - - lpfc_mbuf_free(phba, bmp->virt, bmp->phys); - -free_rspiocbq: - lpfc_sli_release_iocbq(phba, rspiocbq); -free_cmdiocbq: lpfc_sli_release_iocbq(phba, cmdiocbq); + +free_rmp: + lpfc_free_bsg_buffers(phba, rmp); +free_cmp: + lpfc_free_bsg_buffers(phba, cmp); free_bmp: + if (bmp->virt) + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); free_dd: kfree(dd_data); @@ -5162,70 +5305,99 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *cmdiocb; - struct lpfc_bsg_event *evt; - struct lpfc_bsg_iocb *iocb; - struct lpfc_bsg_mbox *mbox; - struct lpfc_bsg_menlo *menlo; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct bsg_job_data *dd_data; unsigned long flags; + int rc = 0; + LIST_HEAD(completions); + struct lpfc_iocbq *check_iocb, *next_iocb; + + /* if job's driver data is NULL, the command completed or is in the + * the process of completing. In this case, return status to request + * so the timeout is retried. This avoids double completion issues + * and the request will be pulled off the timer queue when the + * command's completion handler executes. Otherwise, prevent the + * command's completion handler from executing the job done callback + * and continue processing to abort the outstanding the command. + */ spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = (struct bsg_job_data *)job->dd_data; - /* timeout and completion crossed paths if no dd_data */ - if (!dd_data) { + if (dd_data) { + dd_data->set_job = NULL; + job->dd_data = NULL; + } else { spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - return 0; + return -EAGAIN; } switch (dd_data->type) { case TYPE_IOCB: - iocb = &dd_data->context_un.iocb; - cmdiocb = iocb->cmdiocbq; - /* hint to completion handler that the job timed out */ - job->reply->result = -EAGAIN; + /* Check to see if IOCB was issued to the port or not. If not, + * remove it from the txq queue and call cancel iocbs. + * Otherwise, call abort iotag + */ + cmdiocb = dd_data->context_un.iocb.cmdiocbq; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - /* this will call our completion handler */ - spin_lock_irq(&phba->hbalock); - lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); - spin_unlock_irq(&phba->hbalock); + + spin_lock_irqsave(&phba->hbalock, flags); + /* make sure the I/O abort window is still open */ + if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return -EAGAIN; + } + list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, + list) { + if (check_iocb == cmdiocb) { + list_move_tail(&check_iocb->list, &completions); + break; + } + } + if (list_empty(&completions)) + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (!list_empty(&completions)) { + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + } break; + case TYPE_EVT: - evt = dd_data->context_un.evt; - /* this event has no job anymore */ - evt->set_job = NULL; - job->dd_data = NULL; - job->reply->reply_payload_rcv_len = 0; - /* Return -EAGAIN which is our way of signallying the - * app to retry. - */ - job->reply->result = -EAGAIN; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - job->job_done(job); break; + case TYPE_MBOX: - mbox = &dd_data->context_un.mbox; - /* this mbox has no job anymore */ - mbox->set_job = NULL; - job->dd_data = NULL; - job->reply->reply_payload_rcv_len = 0; - job->reply->result = -EAGAIN; - /* the mbox completion handler can now be run */ - spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - job->job_done(job); + /* Update the ext buf ctx state if needed */ + if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT) phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; case TYPE_MENLO: - menlo = &dd_data->context_un.menlo; - cmdiocb = menlo->cmdiocbq; - /* hint to completion handler that the job timed out */ - job->reply->result = -EAGAIN; + /* Check to see if IOCB was issued to the port or not. If not, + * remove it from the txq queue and call cancel iocbs. + * Otherwise, call abort iotag. + */ + cmdiocb = dd_data->context_un.menlo.cmdiocbq; spin_unlock_irqrestore(&phba->ct_ev_lock, flags); - /* this will call our completion handler */ - spin_lock_irq(&phba->hbalock); - lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); - spin_unlock_irq(&phba->hbalock); + + spin_lock_irqsave(&phba->hbalock, flags); + list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, + list) { + if (check_iocb == cmdiocb) { + list_move_tail(&check_iocb->list, &completions); + break; + } + } + if (list_empty(&completions)) + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (!list_empty(&completions)) { + lpfc_sli_cancel_iocbs(phba, &completions, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); + } break; default: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); @@ -5236,5 +5408,5 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) * otherwise an error message will be displayed on the console * so always return success (zero) */ - return 0; + return rc; } diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 67f7d0a160d..928ef609f36 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2010-2012 Emulex. All rights reserved. * + * Copyright (C) 2010-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -231,6 +231,8 @@ struct lpfc_sli_config_emb0_subsys { #define SLI_CONFIG_SUBSYS_FCOE 0x0C #define FCOE_OPCODE_READ_FCF 0x08 #define FCOE_OPCODE_ADD_FCF 0x09 +#define FCOE_OPCODE_SET_DPORT_MODE 0x27 +#define FCOE_OPCODE_GET_DPORT_RESULTS 0x28 }; struct lpfc_sli_config_emb1_subsys { diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 76ca65dae78..db5604f01a1 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -106,6 +106,7 @@ void lpfc_cleanup_discovery_resources(struct lpfc_vport *); void lpfc_cleanup(struct lpfc_vport *); void lpfc_disc_timeout(unsigned long); +int lpfc_unregister_fcf_prep(struct lpfc_hba *); struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); void lpfc_worker_wake_up(struct lpfc_hba *); @@ -186,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int); void lpfc_offline(struct lpfc_hba *); void lpfc_reset_hba(struct lpfc_hba *); +int lpfc_fof_queue_create(struct lpfc_hba *); +int lpfc_fof_queue_setup(struct lpfc_hba *); +int lpfc_fof_queue_destroy(struct lpfc_hba *); +irqreturn_t lpfc_sli4_fof_intr_handler(int, void *); + int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_queue_setup(struct lpfc_hba *); @@ -241,6 +247,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *); int lpfc_mem_alloc(struct lpfc_hba *, int align); +int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); @@ -282,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); +void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, @@ -303,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd); int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, uint64_t, lpfc_ctx_cmd); +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, + uint16_t, uint64_t, lpfc_ctx_cmd); void lpfc_mbox_timeout(unsigned long); void lpfc_mbox_timeout_handler(struct lpfc_hba *); @@ -398,7 +409,6 @@ void lpfc_fabric_block_timeout(unsigned long); void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); void lpfc_rampdown_queue_depth(struct lpfc_hba *); void lpfc_ramp_down_queue_handler(struct lpfc_hba *); -void lpfc_ramp_up_queue_handler(struct lpfc_hba *); void lpfc_scsi_dev_block(struct lpfc_hba *); void @@ -469,3 +479,21 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); +void lpfc_sli4_offline_eratt(struct lpfc_hba *); + +struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *, + struct lpfc_name *, + struct lpfc_name *, + uint64_t, bool); +void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*); +struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, + struct list_head *list, + struct lpfc_name *, + struct lpfc_name *, uint64_t); +bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t); +bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t); +bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, + struct lpfc_name *, uint64_t *, struct lpfc_name *, + struct lpfc_name *, uint64_t *, uint32_t *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 7bff3a19af5..da61d8dc044 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -280,7 +280,7 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); - ctiocb->context1 = NULL; + ctiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, ctiocb); return 0; @@ -895,7 +895,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, - "0268 NS cmd %x Error (%d %d)\n", + "0268 NS cmd x%x Error (x%x x%x)\n", cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]); if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && @@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) if (init_utsname()->nodename[0] != '\0') lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, jiffies + + msecs_to_jiffies(1000 * 60)); } return; } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index f63f5ff7f27..b0aedce3f54 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2012 Emulex. All rights reserved. * + * Copyright (C) 2007-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -1165,22 +1165,8 @@ out: static loff_t lpfc_debugfs_lseek(struct file *file, loff_t off, int whence) { - struct lpfc_debug *debug; - loff_t pos = -1; - - debug = file->private_data; - - switch (whence) { - case 0: - pos = off; - break; - case 1: - pos = file->f_pos + off; - break; - case 2: - pos = debug->len - off; - } - return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos); + struct lpfc_debug *debug = file->private_data; + return fixed_size_llseek(file, off, whence, debug->len); } /** @@ -2294,6 +2280,104 @@ proc_cq: } } + if (phba->cfg_fof) { + /* FOF EQ */ + qp = phba->sli4_hba.fof_eq; + if (!qp) + goto out; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\nFOF EQ info: " + "EQ-STAT[max:x%x noE:x%x " + "bs:x%x proc:x%llx]\n", + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "EQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + /* Reset max counter */ + qp->EQ_max_eqe = 0; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + } + + if (phba->cfg_fof) { + + /* OAS CQ */ + qp = phba->sli4_hba.oas_cq; + if (qp) { + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tOAS CQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocEQID[%02d]: " + "CQ STAT[max:x%x relw:x%x " + "xabt:x%x wq:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, qp->q_cnt_2, + qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tCQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, qp->entry_count, + qp->entry_size, qp->host_index, + qp->hba_index); + + /* Reset max counter */ + qp->CQ_max_cqe = 0; + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + } + + /* OAS WQ */ + qp = phba->sli4_hba.oas_wq; + if (qp) { + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tOAS WQ info: "); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "AssocCQID[%02d]: " + "WQ-STAT[oflow:x%x posted:x%llx]\n", + qp->assoc_qid, + qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tWQID[%02d], " + "QE-CNT[%04d], QE-SIZE[%04d], " + "HOST-IDX[%04d], PORT-IDX[%04d]", + qp->queue_id, + qp->entry_count, + qp->entry_size, + qp->host_index, + qp->hba_index); + + len += snprintf(pbuffer+len, + LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + if (len >= max_cnt) + goto too_big; + } + } +out: spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); @@ -3941,6 +4025,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) struct lpfc_hba *phba = vport->phba; char name[64]; uint32_t num, i; + bool pport_setup = false; if (!lpfc_debugfs_enable) return; @@ -3961,6 +4046,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) /* Setup funcX directory for specific HBA PCI function */ snprintf(name, sizeof(name), "fn%d", phba->brd_no); if (!phba->hba_debugfs_root) { + pport_setup = true; phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); if (!phba->hba_debugfs_root) { @@ -4015,7 +4101,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } } else - phba->debug_dumpHBASlim = NULL; + phba->debug_dumpHostSlim = NULL; /* Setup dumpData */ snprintf(name, sizeof(name), "dumpData"); @@ -4253,6 +4339,14 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } /* + * The following section is for additional directories/files for the + * physical port. + */ + + if (!pport_setup) + goto debug_failed; + + /* * iDiag debugfs root entry points for SLI4 device only */ if (phba->sli_rev < LPFC_SLI_REV4) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index af49fb03dbb..1a6fe524940 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -116,7 +116,7 @@ struct lpfc_nodelist { atomic_t cmd_pending; uint32_t cmd_qdepth; unsigned long last_change_time; - struct lpfc_node_rrqs active_rrqs; + unsigned long *active_rrqs_xri_bitmap; struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ }; struct lpfc_node_rrq { @@ -154,6 +154,7 @@ struct lpfc_node_rrq { #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ +#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ #define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ /* ndlp usage management macros */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 08d156a9094..7a5d81a65be 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -29,6 +29,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.remoteID = did; /* DID */ icmd->ulpCommand = CMD_ELS_REQUEST64_CR; - icmd->ulpTimeout = phba->fc_ratov * 2; + if (elscmd == ELS_CMD_FLOGI) + icmd->ulpTimeout = FF_DEF_RATOV * 2; + else + icmd->ulpTimeout = phba->fc_ratov * 2; } else { icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); @@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, /* Xmit ELS command <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " - "NPORT x%x I/O tag: x%x, port state: x%x\n", + "NPORT x%x I/O tag: x%x, port state:x%x" + " fc_flag:x%x\n", elscmd, did, elsiocb->iotag, - vport->port_state); + vport->port_state, + vport->fc_flag); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0117 Xmit ELS response x%x to remote " - "NPORT x%x I/O tag: x%x, size: x%x\n", + "NPORT x%x I/O tag: x%x, size: x%x " + "port_state x%x fc_flag x%x\n", elscmd, ndlp->nlp_DID, elsiocb->iotag, - cmdSize); + cmdSize, vport->port_state, + vport->fc_flag); } return elsiocb; @@ -484,6 +492,7 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport) vport->port_state = LPFC_FABRIC_CFG_LINK; memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); lpfc_reg_vfi(mboxq, vport, dmabuf->phys); + mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; mboxq->vport = vport; mboxq->context1 = dmabuf; @@ -700,6 +709,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } + /* + * For FC we need to do some special processing because of the SLI + * Port's default settings of the Common Service Parameters. + */ + if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) { + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed) + lpfc_unregister_fcf_prep(phba); + + /* This should just update the VFI CSPs*/ + if (vport->fc_flag & FC_VFI_REGISTERED) + lpfc_issue_reg_vfi(vport); + } + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { @@ -894,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + /* The FC_VFI_REGISTERED flag will get clear in the cmpl + * handler for unreg_vfi, but if we don't force the + * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be + * built with the update bit set instead of just the vp bit to + * change the Nport ID. We need to have the vp set and the + * Upd cleared on topology changes. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); @@ -1015,9 +1055,19 @@ stop_rr_fcf_flogi: vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; if ((phba->sli_rev == LPFC_SLI_REV4) && (!(vport->fc_flag & FC_VFI_REGISTERED) || - (vport->fc_prevDID != vport->fc_myDID))) { - if (vport->fc_flag & FC_VFI_REGISTERED) - lpfc_sli4_unreg_all_rpis(vport); + (vport->fc_prevDID != vport->fc_myDID) || + phba->fc_topology_changed)) { + if (vport->fc_flag & FC_VFI_REGISTERED) { + if (phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } else { + lpfc_sli4_unreg_all_rpis(vport); + } + } lpfc_issue_reg_vfi(vport); lpfc_nlp_put(ndlp); goto out; @@ -1039,10 +1089,11 @@ stop_rr_fcf_flogi: /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0101 FLOGI completes successfully " - "Data: x%x x%x x%x x%x\n", + "0101 FLOGI completes successfully, I/O tag:x%x, " + "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, - sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, + vport->port_state, vport->fc_flag); if (vport->port_state == LPFC_FLOGI) { /* @@ -1465,7 +1516,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, uint32_t rc, keepDID = 0; int put_node; int put_rport; - struct lpfc_node_rrqs rrq; + unsigned long *active_rrqs_xri_bitmap = NULL; /* Fabric nodes can have the same WWPN so we don't bother searching * by WWPN. Just return the ndlp that was given to us. @@ -1483,7 +1534,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) return ndlp; - memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); + if (phba->sli_rev == LPFC_SLI_REV4) { + active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, + GFP_KERNEL); + if (active_rrqs_xri_bitmap) + memset(active_rrqs_xri_bitmap, 0, + phba->cfg_rrq_xri_bitmap_sz); + } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", @@ -1492,41 +1549,58 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, if (!new_ndlp) { rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); - if (!rc) + if (!rc) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); - if (!new_ndlp) + if (!new_ndlp) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { rc = memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); - if (!rc) + if (!rc) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } new_ndlp = lpfc_enable_node(vport, new_ndlp, NLP_STE_UNUSED_NODE); - if (!new_ndlp) + if (!new_ndlp) { + if (active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return ndlp; + } keepDID = new_ndlp->nlp_DID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&rrq.xri_bitmap, - &new_ndlp->active_rrqs.xri_bitmap, - sizeof(new_ndlp->active_rrqs.xri_bitmap)); + if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap) + memcpy(active_rrqs_xri_bitmap, + new_ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); } else { keepDID = new_ndlp->nlp_DID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&rrq.xri_bitmap, - &new_ndlp->active_rrqs.xri_bitmap, - sizeof(new_ndlp->active_rrqs.xri_bitmap)); + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(active_rrqs_xri_bitmap, + new_ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); } lpfc_unreg_rpi(vport, new_ndlp); new_ndlp->nlp_DID = ndlp->nlp_DID; new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(new_ndlp->active_rrqs.xri_bitmap, - &ndlp->active_rrqs.xri_bitmap, - sizeof(ndlp->active_rrqs.xri_bitmap)); + memcpy(new_ndlp->active_rrqs_xri_bitmap, + ndlp->active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); if (ndlp->nlp_flag & NLP_NPR_2B_DISC) new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; @@ -1568,10 +1642,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, /* Two ndlps cannot have the same did on the nodelist */ ndlp->nlp_DID = keepDID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&ndlp->active_rrqs.xri_bitmap, - &rrq.xri_bitmap, - sizeof(ndlp->active_rrqs.xri_bitmap)); + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(ndlp->active_rrqs_xri_bitmap, + active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); lpfc_drop_node(vport, ndlp); } else { @@ -1583,10 +1658,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, /* Two ndlps cannot have the same did */ ndlp->nlp_DID = keepDID; - if (phba->sli_rev == LPFC_SLI_REV4) - memcpy(&ndlp->active_rrqs.xri_bitmap, - &rrq.xri_bitmap, - sizeof(ndlp->active_rrqs.xri_bitmap)); + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + memcpy(ndlp->active_rrqs_xri_bitmap, + active_rrqs_xri_bitmap, + phba->cfg_rrq_xri_bitmap_sz); /* Since we are swapping the ndlp passed in with the new one * and the did has already been swapped, copy over state. @@ -1617,6 +1693,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, put_device(&rport->dev); } } + if (phba->sli_rev == LPFC_SLI_REV4 && + active_rrqs_xri_bitmap) + mempool_free(active_rrqs_xri_bitmap, + phba->active_rrq_pool); return new_ndlp; } @@ -2071,6 +2151,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } npr->estabImagePair = 1; npr->readXferRdyDis = 1; + if (vport->cfg_first_burst_size) + npr->writeXferRdyDis = 1; /* For FCP support */ npr->prliType = PRLI_FCP_TYPE; @@ -2719,6 +2801,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) /* This will cause the callback-function lpfc_cmpl_els_cmd to * trigger the release of node. */ + lpfc_nlp_put(ndlp); return 0; } @@ -5032,6 +5115,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; uint32_t cmd, did; int rc; + uint32_t fc_flag = 0; + uint32_t port_state = 0; cmd = *lp++; sp = (struct serv_parm *) lp; @@ -5098,16 +5183,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * will be. */ vport->fc_myDID = PT2PT_LocalID; - } + } else + vport->fc_myDID = PT2PT_RemoteID; /* * The vport state should go to LPFC_FLOGI only * AFTER we issue a FLOGI, not receive one. */ spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->port_state = LPFC_FLOGI; spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); /* * We temporarily set fc_myDID to make it look like we are @@ -6129,11 +6223,11 @@ lpfc_els_timeout(unsigned long ptr) spin_lock_irqsave(&vport->work_port_lock, iflag); tmo_posted = vport->work_port_events & WORKER_ELS_TMO; - if (!tmo_posted) + if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) vport->work_port_events |= WORKER_ELS_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflag); - if (!tmo_posted) + if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) lpfc_worker_wake_up(phba); return; } @@ -6159,19 +6253,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) uint32_t els_command = 0; uint32_t timeout; uint32_t remote_ID = 0xffffffff; - LIST_HEAD(txcmplq_completions); LIST_HEAD(abort_list); timeout = (uint32_t)(phba->fc_ratov << 1); pring = &phba->sli.ring[LPFC_ELS_RING]; - + if ((phba->pport->load_flag & FC_UNLOADING)) + return; spin_lock_irq(&phba->hbalock); - list_splice_init(&pring->txcmplq, &txcmplq_completions); - spin_unlock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + if ((phba->pport->load_flag & FC_UNLOADING)) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + return; + } - list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { cmd = &piocb->iocb; if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || @@ -6210,11 +6311,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) } list_add_tail(&piocb->dlist, &abort_list); } - spin_lock_irq(&phba->hbalock); - list_splice(&txcmplq_completions, &pring->txcmplq); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { + cmd = &piocb->iocb; lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0127 ELS timeout Data: x%x x%x x%x " "x%x\n", els_command, @@ -6225,8 +6327,10 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) spin_unlock_irq(&phba->hbalock); } - if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) + if (!(phba->pport->load_flag & FC_UNLOADING)) + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); } /** @@ -6252,15 +6356,50 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) void lpfc_els_flush_cmd(struct lpfc_vport *vport) { - LIST_HEAD(completions); + LIST_HEAD(abort_list); struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; lpfc_fabric_abort_vport(vport); + /* + * For SLI3, only the hbalock is required. But SLI4 needs to coordinate + * with the ring insert operation. Because lpfc_sli_issue_abort_iotag + * ultimately grabs the ring_lock, the driver must splice the list into + * a working list and release the locks before calling the abort. + */ + spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { + if (piocb->iocb_flag & LPFC_IO_LIBDFC) + continue; + + if (piocb->vport != vport) + continue; + list_add_tail(&piocb->dlist, &abort_list); + } + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + /* Abort each iocb on the aborted list and remove the dlist links. */ + list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { + spin_lock_irq(&phba->hbalock); + list_del_init(&piocb->dlist); + lpfc_sli_issue_abort_iotag(phba, pring, piocb); + spin_unlock_irq(&phba->hbalock); + } + if (!list_empty(&abort_list)) + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "3387 abort list for txq not empty\n"); + INIT_LIST_HEAD(&abort_list); spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { cmd = &piocb->iocb; @@ -6278,25 +6417,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) if (piocb->vport != vport) continue; - list_move_tail(&piocb->list, &completions); - pring->txq_cnt--; - } - - list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - if (piocb->iocb_flag & LPFC_IO_LIBDFC) { - continue; - } - - if (piocb->vport != vport) - continue; - - lpfc_sli_issue_abort_iotag(phba, pring, piocb); + list_del_init(&piocb->list); + list_add_tail(&piocb->list, &abort_list); } + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); /* Cancell all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + lpfc_sli_cancel_iocbs(phba, &abort_list, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); return; } @@ -6321,36 +6451,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) void lpfc_els_flush_all_cmd(struct lpfc_hba *phba) { - LIST_HEAD(completions); - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - struct lpfc_iocbq *tmp_iocb, *piocb; - IOCB_t *cmd = NULL; - - lpfc_fabric_abort_hba(phba); - spin_lock_irq(&phba->hbalock); - list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { - cmd = &piocb->iocb; - if (piocb->iocb_flag & LPFC_IO_LIBDFC) - continue; - /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ - if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || - cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || - cmd->ulpCommand == CMD_CLOSE_XRI_CN || - cmd->ulpCommand == CMD_ABORT_XRI_CN) - continue; - list_move_tail(&piocb->list, &completions); - pring->txq_cnt--; - } - list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { - if (piocb->iocb_flag & LPFC_IO_LIBDFC) - continue; - lpfc_sli_issue_abort_iotag(phba, pring, piocb); - } - spin_unlock_irq(&phba->hbalock); - - /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + struct lpfc_vport *vport; + list_for_each_entry(vport, &phba->port_list, listentry) + lpfc_els_flush_cmd(vport); return; } @@ -6599,7 +6702,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* ELS command <elsCmd> received from NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0112 ELS command x%x received from NPORT x%x " - "Data: x%x\n", cmd, did, vport->port_state); + "Data: x%x x%x x%x x%x\n", + cmd, did, vport->port_state, vport->fc_flag, + vport->fc_myDID, vport->fc_prevDID); switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -6608,6 +6713,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPLOGI++; ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->pport->fc_flag & FC_PT2PT)) { + vport->fc_prevDID = vport->fc_myDID; + /* Our DID needs to be updated before registering + * the vfi. This is done in lpfc_rcv_plogi but + * that is called after the reg_vfi. + */ + vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3312 Remote port assigned DID x%x " + "%x\n", vport->fc_myDID, + vport->fc_prevDID); + } lpfc_send_els_event(vport, ndlp, payload); @@ -6617,6 +6735,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } + shost = lpfc_shost_from_vport(vport); if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6628,9 +6747,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * another NPort and the other side has initiated * the PLOGI before responding to our FLOGI. */ + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->fc_topology_changed || + vport->fc_myDID != vport->fc_prevDID)) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } } - shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; spin_unlock_irq(shost->host_lock); @@ -6989,8 +7117,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "3334 Delay fc port discovery for %d seconds\n", + phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + HZ * phba->fc_ratov); + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); return; } spin_unlock_irq(shost->host_lock); @@ -7274,7 +7405,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; shost = lpfc_shost_from_vport(phba->pport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -7778,7 +7909,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) - mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); + mod_timer(&phba->fabric_block_timer, + jiffies + msecs_to_jiffies(100)); return; } @@ -8065,7 +8197,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, rxid, 1); /* Check if TXQ queue needs to be serviced */ - if (pring->txq_cnt) + if (!(list_empty(&pring->txq))) lpfc_worker_wake_up(phba); return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d7096ad94d3..2a17e31265b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (!list_empty(&evtp->evt_listp)) return; + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); @@ -673,8 +674,6 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_fdmi_timeout_handler(vport); if (work_port_events & WORKER_RAMP_DOWN_QUEUE) lpfc_ramp_down_queue_handler(phba); - if (work_port_events & WORKER_RAMP_UP_QUEUE) - lpfc_ramp_up_queue_handler(phba); if (work_port_events & WORKER_DELAYED_DISC_TMO) lpfc_delayed_disc_timeout_handler(vport); } @@ -691,12 +690,15 @@ lpfc_work_done(struct lpfc_hba *phba) /* Set the lpfc data pending flag */ set_bit(LPFC_DATA_READY, &phba->data_flags); } else { - pring->flag &= ~LPFC_DEFERRED_RING_EVENT; - lpfc_sli_handle_slow_ring_event(phba, pring, - (status & - HA_RXMASK)); + if (phba->link_state >= LPFC_LINK_UP) { + pring->flag &= ~LPFC_DEFERRED_RING_EVENT; + lpfc_sli_handle_slow_ring_event(phba, pring, + (status & + HA_RXMASK)); + } } - if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt) + if ((phba->sli_rev == LPFC_SLI_REV4) & + (!list_empty(&pring->txq))) lpfc_drain_txq(phba); /* * Turn on Ring interrupts @@ -729,7 +731,7 @@ lpfc_do_work(void *p) struct lpfc_hba *phba = p; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); current->flags |= PF_NOFREEZE; phba->data_flags = 0; @@ -1005,9 +1007,6 @@ lpfc_linkup(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - (phba->sli_rev < LPFC_SLI_REV4)) - lpfc_issue_clear_la(phba, phba->pport); return 0; } @@ -1433,7 +1432,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; - if (phba->pport->port_state != LPFC_FLOGI) { + if (phba->pport->port_state != LPFC_FLOGI && + phba->pport->fc_flag & FC_FABRIC) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); @@ -1732,7 +1732,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) * use through a sequence of @fcf_cnt eligible FCF records with equal * probability. To perform integer manunipulation of random numbers with * size unit32_t, the lower 16 bits of the 32-bit random number returned - * from random32() are taken as the random random number generated. + * from prandom_u32() are taken as the random random number generated. * * Returns true when outcome is for the newly read FCF record should be * chosen; otherwise, return false when outcome is for keeping the previously @@ -1744,7 +1744,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) uint32_t rand_num; /* Get 16-bit uniform random number */ - rand_num = (0xFFFF & random32()); + rand_num = 0xFFFF & prandom_u32(); /* Decision with probability 1/fcf_cnt */ if ((fcf_cnt * rand_num) < 0xFFFF) @@ -1792,6 +1792,8 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; + lpfc_sli_pcimem_bcopy(shdr, shdr, + sizeof(union lpfc_sli4_cfg_shdr)); shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { @@ -2265,8 +2267,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " - "FCF (x%x)\n", - phba->fcf.current_rec.fcf_indx); + "FCF (x%x), port_state:x%x, " + "fc_flag:x%x\n", + phba->fcf.current_rec.fcf_indx, + phba->pport->port_state, + phba->pport->fc_flag); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, @@ -2380,7 +2385,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->fcf.eligible_fcf_cnt = 1; /* Seeding the random number generator for random selection */ seed = (uint32_t)(0xFFFFFFFF & jiffies); - srandom32(seed); + prandom_seed(seed); } spin_unlock_irq(&phba->hbalock); goto read_next_fcf; @@ -2538,8 +2543,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (!new_fcf_record) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2766 Mailbox command READ_FCF_RECORD " - "failed to retrieve a FCF record.\n"); - goto error_out; + "failed to retrieve a FCF record. " + "hba_flg x%x fcf_flg x%x\n", phba->hba_flag, + phba->fcf.fcf_flag); + lpfc_unregister_fcf_rescan(phba); + goto out; } /* Get the needed parameters from FCF record */ @@ -2791,7 +2799,19 @@ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; - int rc; + int rc, vpi; + + if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { + vpi = lpfc_alloc_vpi(vport->phba); + if (!vpi) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_MBOX, + "3303 Failed to obtain vport vpi\n"); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->vpi = vpi; + } mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { @@ -2888,6 +2908,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out_free_mem; } + + /* If the VFI is already registered, there is nothing else to do + * Unless this was a VFI update and we are in PT2PT mode, then + * we should drop through to set the port state to ready. + */ + if (vport->fc_flag & FC_VFI_REGISTERED) + if (!(phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_PT2PT)) + goto out_free_mem; + /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); vport->vpi_state |= LPFC_VPI_REGISTERED; @@ -2903,6 +2933,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " + "alpacnt:%d LinkState:%x topology:%x\n", + vport->port_state, vport->fc_flag, vport->fc_myDID, + vport->phba->alpa_map[0], + phba->link_state, phba->fc_topology); + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* * For private loop or for NPort pt2pt, @@ -2915,7 +2952,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(vport); + if (vport->fc_flag & FC_PT2PT) + vport->port_state = LPFC_VPORT_READY; + else + lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); @@ -2980,6 +3020,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) struct lpfc_dmabuf *mp; int rc; struct fcf_record *fcf_record; + uint32_t fc_flags = 0; spin_lock_irq(&phba->hbalock); switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { @@ -2996,6 +3037,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) break; } + if (phba->fc_topology && + phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3314 Toplogy changed was 0x%x is 0x%x\n", + phba->fc_topology, + bf_get(lpfc_mbx_read_top_topology, la)); + phba->fc_topology_changed = 1; + } + phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; @@ -3011,11 +3061,8 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) "1309 Link Up Event npiv not supported in loop " "topology\n"); /* Get Loop Map information */ - if (bf_get(lpfc_mbx_read_top_il, la)) { - spin_lock(shost->host_lock); - vport->fc_flag |= FC_LBIT; - spin_unlock(shost->host_lock); - } + if (bf_get(lpfc_mbx_read_top_il, la)) + fc_flags |= FC_LBIT; vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); i = la->lilpBde64.tus.f.bdeSize; @@ -3064,12 +3111,16 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; } vport->fc_myDID = phba->fc_pref_DID; - spin_lock(shost->host_lock); - vport->fc_flag |= FC_LBIT; - spin_unlock(shost->host_lock); + fc_flags |= FC_LBIT; } spin_unlock_irq(&phba->hbalock); + if (fc_flags) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= fc_flags; + spin_unlock_irq(shost->host_lock); + } + lpfc_linkup(phba); sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!sparam_mbox) @@ -3237,8 +3288,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport->fc_flag &= ~FC_BYPASSED_MODE; spin_unlock_irq(shost->host_lock); - if ((phba->fc_eventTag < la->eventTag) || - (phba->fc_eventTag == la->eventTag)) { + if (phba->fc_eventTag <= la->eventTag) { phba->fc_stat.LinkMultiEvent++; if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) if (phba->fc_eventTag != 0) @@ -3246,16 +3296,18 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } phba->fc_eventTag = la->eventTag; - spin_lock_irq(&phba->hbalock); - if (bf_get(lpfc_mbx_read_top_mm, la)) - phba->sli.sli_flag |= LPFC_MENLO_MAINT; - else - phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; - spin_unlock_irq(&phba->hbalock); + if (phba->sli_rev < LPFC_SLI_REV4) { + spin_lock_irq(&phba->hbalock); + if (bf_get(lpfc_mbx_read_top_mm, la)) + phba->sli.sli_flag |= LPFC_MENLO_MAINT; + else + phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; + spin_unlock_irq(&phba->hbalock); + } phba->link_events++; if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && - (!bf_get(lpfc_mbx_read_top_mm, la))) { + !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) { phba->fc_stat.LinkUp++; if (phba->link_flag & LS_LOOPBACK_MODE) { lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, @@ -3300,8 +3352,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) bf_get(lpfc_mbx_read_top_fa, la)); lpfc_mbx_issue_link_down(phba); } - if ((bf_get(lpfc_mbx_read_top_mm, la)) && - (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) { + if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) && + ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) { if (phba->link_state != LPFC_LINK_DOWN) { phba->fc_stat.LinkDown++; lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, @@ -3329,8 +3381,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } } - if (bf_get(lpfc_mbx_read_top_fa, la)) { - if (bf_get(lpfc_mbx_read_top_mm, la)) + if ((phba->sli_rev < LPFC_SLI_REV4) && + bf_get(lpfc_mbx_read_top_fa, la)) { + if (phba->sli.sli_flag & LPFC_MENLO_MAINT) lpfc_issue_clear_la(phba, vport); lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, "1311 fa %d\n", @@ -3921,7 +3974,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) vport->fc_map_cnt += count; break; case NLP_STE_NPR_NODE: - vport->fc_npr_cnt += count; + if (vport->fc_npr_cnt == 0 && count == -1) + vport->fc_npr_cnt = 0; + else + vport->fc_npr_cnt += count; break; } spin_unlock_irq(shost->host_lock); @@ -4119,8 +4175,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NLP_INT_NODE_ACT(ndlp); atomic_set(&ndlp->cmd_pending, 0); ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; - if (vport->phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); } struct lpfc_nodelist * @@ -4130,6 +4184,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; uint32_t did; unsigned long flags; + unsigned long *active_rrqs_xri_bitmap = NULL; if (!ndlp) return NULL; @@ -4158,13 +4213,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Keep the original DID */ did = ndlp->nlp_DID; + if (phba->sli_rev == LPFC_SLI_REV4) + active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap; /* re-initialize ndlp except of ndlp linked list pointer */ memset((((char *)ndlp) + sizeof (struct list_head)), 0, sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); lpfc_initialize_node(vport, ndlp, did); + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap; + spin_unlock_irqrestore(&phba->ndlp_lock, flags); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + if (state != NLP_STE_UNUSED_NODE) lpfc_nlp_set_state(vport, ndlp, state); @@ -4221,7 +4284,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); + mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); @@ -4354,7 +4417,6 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) with an error */ list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; } } spin_unlock_irq(&phba->hbalock); @@ -4386,6 +4448,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (!ndlp) return; lpfc_issue_els_logo(vport, ndlp, 0); + mempool_free(pmb, phba->mbox_mem_pool); } /* @@ -4405,7 +4468,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (ndlp->nlp_flag & NLP_RPI_REGISTERED || + ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { + if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3366 RPI x%x needs to be " + "unregistered nlp_flag x%x " + "did x%x\n", + ndlp->nlp_rpi, ndlp->nlp_flag, + ndlp->nlp_DID); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { /* SLI4 ports require the physical rpi value. */ @@ -4738,9 +4809,10 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ((uint32_t) ndlp->nlp_rpi & 0xff)); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0929 FIND node DID " - "Data: x%p x%x x%x x%x\n", + "Data: x%p x%x x%x x%x %p\n", ndlp, ndlp->nlp_DID, - ndlp->nlp_flag, data1); + ndlp->nlp_flag, data1, + ndlp->active_rrqs_xri_bitmap); return ndlp; } } @@ -4936,8 +5008,12 @@ lpfc_disc_start(struct lpfc_vport *vport) uint32_t clear_la_pending; int did_changed; - if (!lpfc_is_link_up(phba)) + if (!lpfc_is_link_up(phba)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3315 Link is not up %x\n", + phba->link_state); return; + } if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; @@ -4970,11 +5046,13 @@ lpfc_disc_start(struct lpfc_vport *vport) if (num_sent) return; - /* Register the VPI for SLI3, NON-NPIV only. */ + /* Register the VPI for SLI3, NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } @@ -5055,7 +5133,6 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; } } @@ -5398,7 +5475,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, + jiffies + msecs_to_jiffies(1000 * 60)); /* decrement the node reference count held for this callback * function. @@ -5551,6 +5629,17 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_initialize_node(vport, ndlp, did); INIT_LIST_HEAD(&ndlp->nlp_listp); + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); + ndlp->active_rrqs_xri_bitmap = + mempool_alloc(vport->phba->active_rrq_pool, + GFP_KERNEL); + if (ndlp->active_rrqs_xri_bitmap) + memset(ndlp->active_rrqs_xri_bitmap, 0, + ndlp->phba->cfg_rrq_xri_bitmap_sz); + } + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node init: did:x%x", @@ -5594,6 +5683,9 @@ lpfc_nlp_release(struct kref *kref) /* free ndlp memory for final ndlp release */ if (NLP_CHK_FREE_REQ(ndlp)) { kfree(ndlp->lat_data); + if (phba->sli_rev == LPFC_SLI_REV4) + mempool_free(ndlp->active_rrqs_xri_bitmap, + ndlp->phba->active_rrq_pool); mempool_free(ndlp, ndlp->phba->nlp_mem_pool); } } @@ -5843,7 +5935,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - int i, rc; + int i = 0, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) @@ -5871,6 +5963,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cleanup_pending_mbox(phba->pport); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(phba->pport); + lpfc_mbx_unreg_vpi(phba->pport); + shost = lpfc_shost_from_vport(phba->pport); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); @@ -6086,13 +6192,41 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, memcpy(&conn_entry->conn_rec, &conn_rec[i], sizeof(struct lpfc_fcf_conn_rec)); - conn_entry->conn_rec.vlan_tag = - le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; - conn_entry->conn_rec.flags = - le16_to_cpu(conn_entry->conn_rec.flags); list_add_tail(&conn_entry->list, &phba->fcf_conn_rec_list); } + + if (!list_empty(&phba->fcf_conn_rec_list)) { + i = 0; + list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, + list) { + conn_rec = &conn_entry->conn_rec; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3345 FCF connection list rec[%02d]: " + "flags:x%04x, vtag:x%04x, " + "fabric_name:x%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x, " + "switch_name:x%02x:%02x:%02x:%02x:" + "%02x:%02x:%02x:%02x\n", i++, + conn_rec->flags, conn_rec->vlan_tag, + conn_rec->fabric_name[0], + conn_rec->fabric_name[1], + conn_rec->fabric_name[2], + conn_rec->fabric_name[3], + conn_rec->fabric_name[4], + conn_rec->fabric_name[5], + conn_rec->fabric_name[6], + conn_rec->fabric_name[7], + conn_rec->switch_name[0], + conn_rec->switch_name[1], + conn_rec->switch_name[2], + conn_rec->switch_name[3], + conn_rec->switch_name[4], + conn_rec->switch_name[5], + conn_rec->switch_name[6], + conn_rec->switch_name[7]); + } + } } /** diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index e8c47603170..23625925237 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2011 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -45,6 +45,7 @@ #define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */ #define LPFC_ELS_RING 2 /* ring 2 for ELS commands */ #define LPFC_FCP_NEXT_RING 3 +#define LPFC_FCP_OAS_RING 3 #define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */ #define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */ @@ -1667,6 +1668,7 @@ enum lpfc_protgrp_type { #define BG_OP_IN_CSUM_OUT_CSUM 0x5 #define BG_OP_IN_CRC_OUT_CSUM 0x6 #define BG_OP_IN_CSUM_OUT_CRC 0x7 +#define BG_OP_RAW_MODE 0x8 struct lpfc_pde5 { uint32_t word0; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 6e93b886cd4..f432ec180cf 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2012 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -200,6 +200,11 @@ struct lpfc_sli_intf { #define LPFC_MAX_IMAX 5000000 #define LPFC_DEF_IMAX 50000 +#define LPFC_MIN_CPU_MAP 0 +#define LPFC_MAX_CPU_MAP 2 +#define LPFC_HBA_CPU_MAP 1 +#define LPFC_DRIVER_CPU_MAP 2 /* Default */ + /* PORT_CAPABILITIES constants. */ #define LPFC_MAX_SUPPORTED_PAGES 8 @@ -229,6 +234,9 @@ struct ulp_bde64 { uint32_t addrHigh; }; +/* Maximun size of immediate data that can fit into a 128 byte WQE */ +#define LPFC_MAX_BDE_IMM_SIZE 64 + struct lpfc_sli4_flags { uint32_t word0; #define lpfc_idx_rsrc_rdy_SHIFT 0 @@ -621,7 +629,7 @@ struct lpfc_register { #define lpfc_sliport_status_rdy_SHIFT 23 #define lpfc_sliport_status_rdy_MASK 0x1 #define lpfc_sliport_status_rdy_WORD word0 -#define MAX_IF_TYPE_2_RESETS 1000 +#define MAX_IF_TYPE_2_RESETS 6 #define LPFC_CTL_PORT_CTL_OFFSET 0x408 #define lpfc_sliport_ctrl_end_SHIFT 30 @@ -1958,6 +1966,9 @@ struct lpfc_mbx_init_vfi { struct lpfc_mbx_reg_vfi { uint32_t word1; +#define lpfc_reg_vfi_upd_SHIFT 29 +#define lpfc_reg_vfi_upd_MASK 0x00000001 +#define lpfc_reg_vfi_upd_WORD word1 #define lpfc_reg_vfi_vp_SHIFT 28 #define lpfc_reg_vfi_vp_MASK 0x00000001 #define lpfc_reg_vfi_vp_WORD word1 @@ -2577,6 +2588,9 @@ struct lpfc_sli4_parameters { #define cfg_mqv_WORD word6 uint32_t word7; uint32_t word8; +#define cfg_wqsize_SHIFT 8 +#define cfg_wqsize_MASK 0x0000000f +#define cfg_wqsize_WORD word8 #define cfg_wqv_SHIFT 14 #define cfg_wqv_MASK 0x00000003 #define cfg_wqv_WORD word8 @@ -2602,6 +2616,9 @@ struct lpfc_sli4_parameters { #define cfg_phwq_SHIFT 15 #define cfg_phwq_MASK 0x00000001 #define cfg_phwq_WORD word12 +#define cfg_oas_SHIFT 25 +#define cfg_oas_MASK 0x00000001 +#define cfg_oas_WORD word12 #define cfg_loopbk_scope_SHIFT 28 #define cfg_loopbk_scope_MASK 0x0000000f #define cfg_loopbk_scope_WORD word12 @@ -3308,6 +3325,9 @@ struct wqe_common { #define wqe_ebde_cnt_SHIFT 0 #define wqe_ebde_cnt_MASK 0x0000000f #define wqe_ebde_cnt_WORD word10 +#define wqe_oas_SHIFT 6 +#define wqe_oas_MASK 0x00000001 +#define wqe_oas_WORD word10 #define wqe_lenloc_SHIFT 7 #define wqe_lenloc_MASK 0x00000003 #define wqe_lenloc_WORD word10 @@ -3425,7 +3445,8 @@ struct els_request64_wqe { #define els_req64_hopcnt_SHIFT 24 #define els_req64_hopcnt_MASK 0x000000ff #define els_req64_hopcnt_WORD word13 - uint32_t reserved[2]; + uint32_t word14; + uint32_t max_response_payload_len; }; struct xmit_els_rsp64_wqe { @@ -3540,7 +3561,8 @@ struct gen_req64_wqe { uint32_t relative_offset; struct wqe_rctl_dfctl wge_ctl; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; + uint32_t rsvd_12_14[3]; + uint32_t max_response_payload_len; }; struct create_xri_wqe { @@ -3570,7 +3592,13 @@ struct abort_cmd_wqe { struct fcp_iwrite64_wqe { struct ulp_bde64 bde; - uint32_t payload_offset_len; + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t total_xfer_len; uint32_t initial_xfer_len; struct wqe_common wqe_com; /* words 6-11 */ @@ -3580,7 +3608,13 @@ struct fcp_iwrite64_wqe { struct fcp_iread64_wqe { struct ulp_bde64 bde; - uint32_t payload_offset_len; /* word 3 */ + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t total_xfer_len; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ @@ -3590,7 +3624,13 @@ struct fcp_iread64_wqe { struct fcp_icmnd64_wqe { struct ulp_bde64 bde; /* words 0-2 */ - uint32_t rsrvd3; /* word 3 */ + uint32_t word3; +#define cmd_buff_len_SHIFT 16 +#define cmd_buff_len_MASK 0x00000ffff +#define cmd_buff_len_WORD word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3 uint32_t rsrvd4; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ @@ -3614,6 +3654,13 @@ union lpfc_wqe { struct gen_req64_wqe gen_req; }; +union lpfc_wqe128 { + uint32_t words[32]; + struct lpfc_wqe_generic generic; + struct xmit_seq64_wqe xmit_sequence; + struct gen_req64_wqe gen_req; +}; + #define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001 #define LPFC_FILE_TYPE_GROUP 0xf7 #define LPFC_FILE_ID_GROUP 0xa2 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 314b4f61b9e..06f9a5b79e6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <linux/firmware.h> #include <linux/miscdevice.h> +#include <linux/percpu.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -58,6 +59,10 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; +/* Used when mapping IRQ vectors in a driver centric manner */ +uint16_t *lpfc_used_cpu; +uint32_t lpfc_present_cpu; + static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); @@ -75,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static void lpfc_sli4_disable_intr(struct lpfc_hba *); static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); +static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -467,10 +473,22 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_sli_read_link_ste(phba); /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1)) - phba->cfg_hba_queue_depth = - (mb->un.varRdConfig.max_xri + 1) - - lpfc_sli4_get_els_iocb_cnt(phba); + i = (mb->un.varRdConfig.max_xri + 1); + if (phba->cfg_hba_queue_depth > i) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3359 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, i); + phba->cfg_hba_queue_depth = i; + } + + /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ + i = (mb->un.varRdConfig.max_xri >> 3); + if (phba->pport->cfg_lun_queue_depth > i) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3360 LUN queue depth changed from %d to %d\n", + phba->pport->cfg_lun_queue_depth, i); + phba->pport->cfg_lun_queue_depth = i; + } phba->lmt = mb->un.varRdConfig.lmt; @@ -541,13 +559,16 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ - mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -799,58 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) } /** - * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free + * rspiocb which got deferred + * * @phba: pointer to lpfc HBA data structure. * - * This routine will do uninitialization after the HBA is reset when bring - * down the SLI Layer. + * This routine will cleanup completed slow path events after HBA is reset + * when bringing down the SLI Layer. + * * * Return codes - * 0 - success. - * Any other value - error. + * void. **/ -static int -lpfc_hba_down_post_s3(struct lpfc_hba *phba) +static void +lpfc_sli4_free_sp_events(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *rspiocbq; + struct hbq_dmabuf *dmabuf; + struct lpfc_cq_event *cq_event; + + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_SP_QUEUE_EVT; + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&phba->sli4_hba.sp_queue_event)) { + /* Get the response iocb from the head of work queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_queue_event, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + + switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { + case CQE_CODE_COMPL_WQE: + rspiocbq = container_of(cq_event, struct lpfc_iocbq, + cq_event); + lpfc_sli_release_iocbq(phba, rspiocbq); + break; + case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: + dmabuf = container_of(cq_event, struct hbq_dmabuf, + cq_event); + lpfc_in_buf_free(phba, &dmabuf->dbuf); + } + } +} + +/** + * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup posted ELS buffers after the HBA is reset + * when bringing down the SLI Layer. + * + * + * Return codes + * void. + **/ +static void +lpfc_hba_free_post_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; - LIST_HEAD(completions); - int i; + LIST_HEAD(buflist); + int count; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ pring = &psli->ring[LPFC_ELS_RING]; - list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->postbufq, &buflist); + spin_unlock_irq(&phba->hbalock); + + count = 0; + list_for_each_entry_safe(mp, next_mp, &buflist, list) { list_del(&mp->list); - pring->postbufq_cnt--; + count++; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } + + spin_lock_irq(&phba->hbalock); + pring->postbufq_cnt -= count; + spin_unlock_irq(&phba->hbalock); } +} + +/** + * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup the txcmplq after the HBA is reset when bringing + * down the SLI Layer. + * + * Return codes + * void + **/ +static void +lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + LIST_HEAD(completions); + int i; - spin_lock_irq(&phba->hbalock); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - + if (phba->sli_rev >= LPFC_SLI_REV4) + spin_lock_irq(&pring->ring_lock); + else + spin_lock_irq(&phba->hbalock); /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on txcmplq as it will NEVER complete. */ list_splice_init(&pring->txcmplq, &completions); pring->txcmplq_cnt = 0; - spin_unlock_irq(&phba->hbalock); + + if (phba->sli_rev >= LPFC_SLI_REV4) + spin_unlock_irq(&pring->ring_lock); + else + spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); - lpfc_sli_abort_iocb_ring(phba, pring); - spin_lock_irq(&phba->hbalock); } - spin_unlock_irq(&phba->hbalock); +} +/** + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + int i; + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) +{ + lpfc_hba_free_post_buf(phba); + lpfc_hba_clean_txcmplq(phba); return 0; } @@ -870,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; LIST_HEAD(aborts); - int ret; unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; - ret = lpfc_hba_down_post_s3(phba); - if (ret) - return ret; + lpfc_hba_free_post_buf(phba); + lpfc_hba_clean_txcmplq(phba); + /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be * on the lpfc_sgl_list so that it can either be freed if the @@ -909,9 +1024,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + + lpfc_sli4_free_sp_events(phba); return 0; } @@ -986,9 +1103,14 @@ lpfc_rrq_timeout(unsigned long ptr) phba = (struct lpfc_hba *)ptr; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - phba->hba_flag |= HBA_RRQ_ACTIVE; + if (!(phba->pport->load_flag & FC_UNLOADING)) + phba->hba_flag |= HBA_RRQ_ACTIVE; + else + phba->hba_flag &= ~HBA_RRQ_ACTIVE; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); - lpfc_worker_wake_up(phba); + + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_worker_wake_up(phba); } /** @@ -1022,7 +1144,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); return; } @@ -1065,15 +1188,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_lock_irq(&phba->pport->work_port_lock); - if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, - jiffies)) { + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); else mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } spin_unlock_irq(&phba->pport->work_port_lock); @@ -1105,7 +1231,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } @@ -1121,7 +1248,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } phba->skipped_hb = 0; @@ -1137,7 +1265,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->skipped_hb = jiffies; mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } else { /* @@ -1151,7 +1280,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) jiffies_to_msecs(jiffies - phba->last_completion_time)); mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); } } } @@ -1192,7 +1322,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ -static void +void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); @@ -1217,7 +1347,6 @@ static void lpfc_handle_deferred_eratt(struct lpfc_hba *phba) { uint32_t old_host_status = phba->work_hs; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; /* If the pci channel is offline, ignore possible errors, @@ -1246,8 +1375,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * dropped by the firmware. Error iocb (I/O) on txcmplq and let the * SCSI layer retry it after re-establishing link. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then @@ -1315,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; uint32_t event_data; unsigned long temperature; struct temp_event temp_event_data; @@ -1367,8 +1494,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * Error iocb (I/O) on txcmplq and let the SCSI layer * retry it after re-establishing link. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then @@ -1441,7 +1567,8 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * for handling possible port resource change. **/ static int -lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action) +lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, + bool en_rn_msg) { int rc; uint32_t intr_mode; @@ -1453,9 +1580,10 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action) rc = lpfc_sli4_pdev_status_reg_wait(phba); if (!rc) { /* need reset: attempt for port recovery */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2887 Reset Needed: Attempting Port " - "Recovery...\n"); + if (en_rn_msg) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2887 Reset Needed: Attempting Port " + "Recovery...\n"); lpfc_offline_prep(phba, mbx_action); lpfc_offline(phba); /* release interrupt for possible resource change */ @@ -1495,6 +1623,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) uint32_t reg_err1, reg_err2; uint32_t uerrlo_reg, uemasklo_reg; uint32_t pci_rd_rc1, pci_rd_rc2; + bool en_rn_msg = true; int rc; /* If the pci channel is offline, ignore possible errors, since @@ -1545,10 +1674,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) break; } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && - reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) + reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3143 Port Down: Firmware Restarted\n"); - else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && + "3143 Port Down: Firmware Update " + "Detected\n"); + en_rn_msg = false; + } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3144 Port Down: Debug Dump\n"); @@ -1558,7 +1689,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) "3145 Port Down: Provisioning\n"); /* Check port status register for function reset */ - rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT); + rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, + en_rn_msg); if (rc == 0) { /* don't report event on forced debug dump */ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && @@ -1901,78 +2033,81 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) switch (dev_id) { case PCI_DEVICE_ID_FIREFLY: - m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP6000", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SUPERFLY: if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) - m = (typeof(m)){"LP7000", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP7000", "PCI", ""}; else - m = (typeof(m)){"LP7000E", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP7000E", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_DRAGONFLY: m = (typeof(m)){"LP8000", "PCI", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_CENTAUR: if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) - m = (typeof(m)){"LP9002", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP9002", "PCI", ""}; else - m = (typeof(m)){"LP9000", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP9000", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_RFLY: m = (typeof(m)){"LP952", "PCI", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PEGASUS: m = (typeof(m)){"LP9802", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_THOR: m = (typeof(m)){"LP10000", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_VIPER: m = (typeof(m)){"LPX1000", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PFLY: m = (typeof(m)){"LP982", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TFLY: m = (typeof(m)){"LP1050", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS: m = (typeof(m)){"LP11000", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_SCSP: m = (typeof(m)){"LP11000-SP", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_DCSP: m = (typeof(m)){"LP11002-SP", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE: - m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_SCSP: - m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1000-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_DCSP: - m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1002-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: - m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP111", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; @@ -1991,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: - m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP101", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP10000S: - m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP10000-S", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP11000S: - m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP11000-S", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LPE11000S: - m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe11000-S", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; @@ -2021,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HORNET: - m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; + m = (typeof(m)){"LP21000", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)){"LPev12000", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_PF: m = (typeof(m)){"LPev12000", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)){"LPemv12002-S", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; @@ -2050,17 +2190,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_BALIUS: m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC: - case PCI_DEVICE_ID_LANCER_FC_VF: m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; break; + case PCI_DEVICE_ID_LANCER_FC_VF: + m = (typeof(m)){"LPe16000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; case PCI_DEVICE_ID_LANCER_FCOE: - case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; break; + case PCI_DEVICE_ID_LANCER_FCOE_VF: + oneConnect = 1; + m = (typeof(m)){"OCe15100", "PCIe", + "Obsolete, Unsupported FCoE"}; + break; case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: oneConnect = 1; @@ -2634,6 +2781,7 @@ lpfc_online(struct lpfc_hba *phba) struct lpfc_vport *vport; struct lpfc_vport **vports; int i; + bool vpis_cleared = false; if (!phba) return 0; @@ -2657,6 +2805,10 @@ lpfc_online(struct lpfc_hba *phba) lpfc_unblock_mgmt_io(phba); return 1; } + spin_lock_irq(&phba->hbalock); + if (!phba->sli4_hba.max_cfg_param.vpi_used) + vpis_cleared = true; + spin_unlock_irq(&phba->hbalock); } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2673,8 +2825,13 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if ((vpis_cleared) && + (vports[i]->port_type != + LPFC_PHYSICAL_PORT)) + vports[i]->vpi = 0; + } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); @@ -2834,16 +2991,30 @@ lpfc_scsi_free(struct lpfc_hba *phba) struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_scsi_bufs maintained by this host. */ - spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { + + spin_lock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, + list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } - spin_unlock(&phba->scsi_buf_list_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); + + spin_lock(&phba->scsi_buf_list_get_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, + list) { + list_del(&sb->list); + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_get_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2915,9 +3086,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &els_sgl_list); } - spin_lock(&phba->hbalock); + spin_lock_irq(&phba->hbalock); list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&phba->hbalock); + spin_unlock_irq(&phba->hbalock); } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { /* els xri-sgl shrinked */ xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt; @@ -2979,9 +3150,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) phba->sli4_hba.scsi_xri_cnt, phba->sli4_hba.scsi_xri_max); - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); + list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); + spin_unlock(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { /* max scsi xri shrinked below the allocated scsi buffers */ @@ -2995,9 +3169,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->dma_handle); kfree(psb); } - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } /* update xris associated to remaining allocated scsi buffers */ @@ -3015,9 +3189,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock(&phba->scsi_buf_list_lock); - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); - spin_unlock(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); return 0; @@ -3198,14 +3375,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= 30 * HZ) { + if (time >= msecs_to_jiffies(30 * 1000)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { + if (time >= msecs_to_jiffies(15 * 1000) && + phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); @@ -3217,7 +3395,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; - if (vport->fc_map_cnt == 0 && time < 2 * HZ) + if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -4189,7 +4367,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -4486,7 +4665,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba) pci_save_state(pdev); /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ - if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) + if (pci_is_pcie(pdev)) pdev->needs_freset = 1; return 0; @@ -4522,8 +4701,6 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba) /* Release PCI resource and disable PCI device */ pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); - /* Null out PCI private reference to driver */ - pci_set_drvdata(pdev, NULL); return; } @@ -4545,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return; } - lpfc_offline_prep(phba, LPFC_MBX_WAIT); + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + else + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); lpfc_online(phba); @@ -4681,23 +4861,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) return -ENOMEM; /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + /* Initialize the host templates the configured values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a BDE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough BDEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + + /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a BDE for each, and a BDE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + /* Total BDEs in BPL for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; } - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ @@ -4763,13 +4972,17 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { + struct lpfc_vector_map_info *cpup; struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; + int rc, i, hbq_count, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; - int longs, sli_family; - int sges_per_segment; + int longs; + int fof_vectors = 0; + + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -4814,20 +5027,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_mbox_ext_buf_ctx)); INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); - /* - * We need to do a READ_CONFIG mailbox command here before - * calling lpfc_get_cfgparam. For VFs this will report the - * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. - * All of the resources allocated - * for this Port are tied to these values. - */ - /* Get all the module params for configuring this host */ - lpfc_get_cfgparam(phba); phba->max_vpi = LPFC_MAX_VPI; - /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ - phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count; - /* This will be set to correct value after the read_config mbox */ phba->max_vports = 0; @@ -4837,11 +5038,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - /* With BlockGuard we can have multiple SGEs per Data Segemnt */ - sges_per_segment = 1; - if (phba->cfg_enable_bg) - sges_per_segment = 2; - /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. @@ -4852,43 +5048,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (!phba->sli.ring) return -ENOMEM; + /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) + phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + + /* + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - * To insure that the scsi sgl does not cross a 4k page boundary only - * sgl sizes of must be a power of 2. */ - buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * - sizeof(struct sli4_sge))); - - sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); - max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; - switch (sli_family) { - case LPFC_SLI_INTF_FAMILY_BE2: - case LPFC_SLI_INTF_FAMILY_BE3: - /* There is a single hint for BE - 2 pages per BPL. */ - if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_SLI_HINT1_1) - max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; - break; - case LPFC_SLI_INTF_FAMILY_LNCR_A0: - case LPFC_SLI_INTF_FAMILY_LNCR_B0: - default: - break; + + if (phba->cfg_enable_bg) { + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a SGE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need + * to post 1 page for the SGL. + */ } - for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; - dma_buf_size < max_buf_size && buf_size > dma_buf_size; - dma_buf_size = dma_buf_size << 1) - ; - if (dma_buf_size == max_buf_size) - phba->cfg_sg_seg_cnt = (dma_buf_size - - sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - - (2 * sizeof(struct sli4_sge))) / - sizeof(struct sli4_sge); - phba->cfg_sg_dma_buf_size = dma_buf_size; + /* Initialize the host templates with the updated values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); @@ -4961,6 +5185,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) rc = lpfc_sli4_read_config(phba); if (unlikely(rc)) goto out_free_bsmbx; + rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); + if (unlikely(rc)) + goto out_free_bsmbx; /* IF Type 0 ports get initialized now. */ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == @@ -5018,6 +5245,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } } mempool_free(mboxq, phba->mbox_mem_pool); + + /* Verify OAS is supported */ + lpfc_sli4_oas_verify(phba); + if (phba->cfg_fof) + fof_vectors = 1; + /* Verify all the SLI4 queues */ rc = lpfc_sli4_queue_verify(phba); if (rc) @@ -5059,7 +5292,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * - phba->cfg_fcp_io_channel), GFP_KERNEL); + (fof_vectors + phba->cfg_fcp_io_channel)), + GFP_KERNEL); if (!phba->sli4_hba.fcp_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for " @@ -5069,7 +5303,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * - phba->cfg_fcp_io_channel), GFP_KERNEL); + (fof_vectors + + phba->cfg_fcp_io_channel)), GFP_KERNEL); if (!phba->sli4_hba.msix_entries) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2573 Failed allocate memory for msi-x " @@ -5078,6 +5313,41 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcp_eq_hdl; } + phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu), + GFP_KERNEL); + if (!phba->sli4_hba.cpu_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3327 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + rc = -ENOMEM; + goto out_free_msix; + } + if (lpfc_used_cpu == NULL) { + lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu), + GFP_KERNEL); + if (!lpfc_used_cpu) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3335 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + kfree(phba->sli4_hba.cpu_map); + rc = -ENOMEM; + goto out_free_msix; + } + for (i = 0; i < lpfc_present_cpu; i++) + lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; + } + + /* Initialize io channels for round robin */ + cpup = phba->sli4_hba.cpu_map; + rc = 0; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = rc; + rc++; + if (rc >= phba->cfg_fcp_io_channel) + rc = 0; + } + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -5097,6 +5367,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_msix: + kfree(phba->sli4_hba.msix_entries); out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_free_fcf_rr_bmask: @@ -5126,6 +5398,12 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ + kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_present_cpu = 0; + phba->sli4_hba.num_online_cpu = 0; + phba->sli4_hba.curr_disp_cpu = 0; + /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); @@ -5234,8 +5512,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -5246,6 +5526,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) /* Initialize FCF connection rec list */ INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + /* Initialize OAS configuration list */ + spin_lock_init(&phba->devicelock); + INIT_LIST_HEAD(&phba->luns); + return 0; } @@ -6521,12 +6805,14 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) goto read_cfg_out; /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > - (phba->sli4_hba.max_cfg_param.max_xri - - lpfc_sli4_get_els_iocb_cnt(phba))) - phba->cfg_hba_queue_depth = - phba->sli4_hba.max_cfg_param.max_xri - - lpfc_sli4_get_els_iocb_cnt(phba); + length = phba->sli4_hba.max_cfg_param.max_xri - + lpfc_sli4_get_els_iocb_cnt(phba); + if (phba->cfg_hba_queue_depth > length) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3361 HBA queue depth changed from %d to %d\n", + phba->cfg_hba_queue_depth, length); + phba->cfg_hba_queue_depth = length; + } if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_2) @@ -6670,7 +6956,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) int cfg_fcp_io_channel; uint32_t cpu; uint32_t i = 0; - + int fof_vectors = phba->cfg_fof ? 1 : 0; /* * Sanity check for configured queue parameters against the run-time @@ -6680,19 +6966,25 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) /* Sanity check on HBA EQ parameters */ cfg_fcp_io_channel = phba->cfg_fcp_io_channel; - /* It doesn't make sense to have more io channels then CPUs */ - for_each_online_cpu(cpu) { - i++; + /* It doesn't make sense to have more io channels then online CPUs */ + for_each_present_cpu(cpu) { + if (cpu_online(cpu)) + i++; } + phba->sli4_hba.num_online_cpu = i; + phba->sli4_hba.num_present_cpu = lpfc_present_cpu; + phba->sli4_hba.curr_disp_cpu = 0; + if (i < cfg_fcp_io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3188 Reducing IO channels to match number of " - "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); + "online CPUs: from %d to %d\n", + cfg_fcp_io_channel, i); cfg_fcp_io_channel = i; } - if (cfg_fcp_io_channel > + if (cfg_fcp_io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { if (phba->sli4_hba.max_cfg_param.max_eq < LPFC_FCP_IO_CHAN_MIN) { @@ -6709,14 +7001,11 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) "available EQs: from %d to %d\n", cfg_fcp_io_channel, phba->sli4_hba.max_cfg_param.max_eq); - cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; + cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - + fof_vectors; } - /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */ - /* The actual number of FCP event queues adopted */ - phba->cfg_fcp_eq_count = cfg_fcp_io_channel; - phba->cfg_fcp_wq_count = cfg_fcp_io_channel; phba->cfg_fcp_io_channel = cfg_fcp_io_channel; /* Get EQ depth from module parameter, fake the default for now */ @@ -6924,6 +7213,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } phba->sli4_hba.dat_rq = qdesc; + /* Create the Queues needed for Flash Optimized Fabric operations */ + if (phba->cfg_fof) + lpfc_fof_queue_create(phba); return 0; out_error: @@ -6948,6 +7240,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { int idx; + if (phba->cfg_fof) + lpfc_fof_queue_destroy(phba); + if (phba->sli4_hba.hba_eq != NULL) { /* Release HBA event queue */ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { @@ -6987,19 +7282,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) phba->sli4_hba.fcp_wq = NULL; } - if (phba->pci_bar0_memmap_p) { - iounmap(phba->pci_bar0_memmap_p); - phba->pci_bar0_memmap_p = NULL; - } - if (phba->pci_bar2_memmap_p) { - iounmap(phba->pci_bar2_memmap_p); - phba->pci_bar2_memmap_p = NULL; - } - if (phba->pci_bar4_memmap_p) { - iounmap(phba->pci_bar4_memmap_p); - phba->pci_bar4_memmap_p = NULL; - } - /* Release FCP CQ mapping array */ if (phba->sli4_hba.fcp_cq_map != NULL) { kfree(phba->sli4_hba.fcp_cq_map); @@ -7345,8 +7627,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.hdr_rq->queue_id, phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.els_cq->queue_id); + + if (phba->cfg_fof) { + rc = lpfc_fof_queue_setup(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0549 Failed setup of FOF Queues: " + "rc = 0x%x\n", rc); + goto out_destroy_els_rq; + } + } return 0; +out_destroy_els_rq: + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); out_destroy_els_wq: lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); out_destroy_mbx_wq: @@ -7385,6 +7679,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) { int fcp_qidx; + /* Unset the queues created for Flash Optimized Fabric operations */ + if (phba->cfg_fof) + lpfc_fof_queue_destroy(phba); /* Unset mailbox command work queue */ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); /* Unset ELS work queue */ @@ -7717,8 +8014,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) out: /* Catch the not-ready port failure after a port reset. */ - if (num_resets >= MAX_IF_TYPE_2_RESETS) + if (num_resets >= MAX_IF_TYPE_2_RESETS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3317 HBA not functional: IP Reset Failed " + "after (%d) retries, try: " + "echo fw_reset > board_mode\n", num_resets); rc = -ENODEV; + } return rc; } @@ -7783,9 +8085,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) * particular PCI BARs regions is dependent on the type of * SLI4 device. */ - if (pci_resource_start(pdev, 0)) { - phba->pci_bar0_map = pci_resource_start(pdev, 0); - bar0map_len = pci_resource_len(pdev, 0); + if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { + phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); + bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); /* * Map SLI4 PCI Config Space Register base to a kernel virtual @@ -7799,6 +8101,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "registers.\n"); goto out; } + phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; /* Set up BAR0 PCI config space register memory map */ lpfc_sli4_bar0_register_memmap(phba, if_type); } else { @@ -7821,13 +8124,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, 2))) { + (pci_resource_start(pdev, PCI_64BIT_BAR2))) { /* * Map SLI4 if type 0 HBA Control Register base to a kernel * virtual address and setup the registers. */ - phba->pci_bar1_map = pci_resource_start(pdev, 2); - bar1map_len = pci_resource_len(pdev, 2); + phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); phba->sli4_hba.ctrl_regs_memmap_p = ioremap(phba->pci_bar1_map, bar1map_len); if (!phba->sli4_hba.ctrl_regs_memmap_p) { @@ -7835,17 +8138,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "ioremap failed for SLI4 HBA control registers.\n"); goto out_iounmap_conf; } + phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; lpfc_sli4_bar1_register_memmap(phba); } if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, 4))) { + (pci_resource_start(pdev, PCI_64BIT_BAR4))) { /* * Map SLI4 if type 0 HBA Doorbell Register base to a kernel * virtual address and setup the registers. */ - phba->pci_bar2_map = pci_resource_start(pdev, 4); - bar2map_len = pci_resource_len(pdev, 4); + phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); phba->sli4_hba.drbl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); if (!phba->sli4_hba.drbl_regs_memmap_p) { @@ -7853,6 +8157,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) "ioremap failed for SLI4 HBA doorbell registers.\n"); goto out_iounmap_ctrl; } + phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); if (error) goto out_iounmap_all; @@ -8183,6 +8488,287 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) } /** + * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * @phba: pointer to lpfc hba data structure. + * + * Find next available CPU to use for IRQ to CPU affinity. + */ +static int +lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + + /* + * If we get here, we have used ALL CPUs for the specific + * phys_id. Now we need to clear out lpfc_used_cpu and start + * reusing CPUs. + */ + + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + if (lpfc_used_cpu[cpu] == phys_id) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + } + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + return LPFC_VECTOR_MAP_EMPTY; +} + +/** + * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors + * @phba: pointer to lpfc hba data structure. + * @vectors: number of HBA vectors + * + * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector + * affinization across multple physical CPUs (numa nodes). + * In addition, this routine will assign an IO channel for each CPU + * to use when issuing I/Os. + */ +static int +lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) +{ + int i, idx, saved_chann, used_chann, cpu, phys_id; + int max_phys_id, min_phys_id; + int num_io_channel, first_cpu, chan; + struct lpfc_vector_map_info *cpup; +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo; +#endif + struct cpumask *mask; + uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; + + /* If there is no mapping, just return */ + if (!phba->cfg_fcp_cpu_map) + return 1; + + /* Init cpu_map array */ + memset(phba->sli4_hba.cpu_map, 0xff, + (sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu)); + + max_phys_id = 0; + min_phys_id = 0xff; + phys_id = 0; + num_io_channel = 0; + first_cpu = LPFC_VECTOR_MAP_EMPTY; + + /* Update CPU map with physical id and core id of each CPU */ + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +#ifdef CONFIG_X86 + cpuinfo = &cpu_data(cpu); + cpup->phys_id = cpuinfo->phys_proc_id; + cpup->core_id = cpuinfo->cpu_core_id; +#else + /* No distinction between CPUs for other platforms */ + cpup->phys_id = 0; + cpup->core_id = 0; +#endif + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU physid %d coreid %d\n", + cpup->phys_id, cpup->core_id); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + if (cpup->phys_id < min_phys_id) + min_phys_id = cpup->phys_id; + cpup++; + } + + phys_id = min_phys_id; + /* Now associate the HBA vectors with specific CPUs */ + for (idx = 0; idx < vectors; idx++) { + cpup = phba->sli4_hba.cpu_map; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) { + + /* Try for all phys_id's */ + for (i = 1; i < max_phys_id; i++) { + phys_id++; + if (phys_id > max_phys_id) + phys_id = min_phys_id; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) + continue; + goto found; + } + + /* Use round robin for scheduling */ + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; + chan = 0; + cpup = phba->sli4_hba.cpu_map; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = chan; + cpup++; + chan++; + if (chan >= phba->cfg_fcp_io_channel) + chan = 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3329 Cannot set affinity:" + "Error mapping vector %d (%d)\n", + idx, vectors); + return 0; + } +found: + cpup += cpu; + if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) + lpfc_used_cpu[cpu] = phys_id; + + /* Associate vector with selected CPU */ + cpup->irq = phba->sli4_hba.msix_entries[idx].vector; + + /* Associate IO channel with selected CPU */ + cpup->channel_id = idx; + num_io_channel++; + + if (first_cpu == LPFC_VECTOR_MAP_EMPTY) + first_cpu = cpu; + + /* Now affinitize to the selected CPU */ + mask = &cpup->maskbits; + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. + vector, mask); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3330 Set Affinity: CPU %d channel %d " + "irq %d (%x)\n", + cpu, cpup->channel_id, + phba->sli4_hba.msix_entries[idx].vector, i); + + /* Spread vector mapping across multple physical CPU nodes */ + phys_id++; + if (phys_id > max_phys_id) + phys_id = min_phys_id; + } + + /* + * Finally fill in the IO channel for any remaining CPUs. + * At this point, all IO channels have been assigned to a specific + * MSIx vector, mapped to a specific CPU. + * Base the remaining IO channel assigned, to IO channels already + * assigned to other CPUs on the same phys_id. + */ + for (i = min_phys_id; i <= max_phys_id; i++) { + /* + * If there are no io channels already mapped to + * this phys_id, just round robin thru the io_channels. + * Setup chann[] for round robin. + */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + chann[idx] = idx; + + saved_chann = 0; + used_chann = 0; + + /* + * First build a list of IO channels already assigned + * to this phys_id before reassigning the same IO + * channels to the remaining CPUs. + */ + cpup = phba->sli4_hba.cpu_map; + cpu = first_cpu; + cpup += cpu; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; + idx++) { + if (cpup->phys_id == i) { + /* + * Save any IO channels that are + * already mapped to this phys_id. + */ + if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { + chann[saved_chann] = + cpup->channel_id; + saved_chann++; + goto out; + } + + /* See if we are using round-robin */ + if (saved_chann == 0) + saved_chann = + phba->cfg_fcp_io_channel; + + /* Associate next IO channel with CPU */ + cpup->channel_id = chann[used_chann]; + num_io_channel++; + used_chann++; + if (used_chann == saved_chann) + used_chann = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3331 Set IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } +out: + cpu++; + if (cpu >= phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpu = 0; + } else { + cpup++; + } + } + } + + if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { + cpup->channel_id = 0; + num_io_channel++; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3332 Assign IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } + cpup++; + } + } + + /* Sanity check */ + if (num_io_channel != phba->sli4_hba.num_present_cpu) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3333 Set affinity mismatch:" + "%d chann != %d cpus: %d vectors\n", + num_io_channel, phba->sli4_hba.num_present_cpu, + vectors); + + /* Enable using cpu affinity for scheduling */ + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; + return 1; +} + + +/** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * @@ -8213,6 +8799,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) /* Configure MSI-X capability structure */ vectors = phba->cfg_fcp_io_channel; + if (phba->cfg_fof) { + phba->sli4_hba.msix_entries[index].entry = index; + vectors++; + } enable_msix_vectors: rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, vectors); @@ -8233,9 +8823,7 @@ enable_msix_vectors: phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ + /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); sprintf((char *)&phba->sli4_hba.handler_name[index], @@ -8244,7 +8832,15 @@ enable_msix_vectors: phba->sli4_hba.fcp_eq_hdl[index].idx = index; phba->sli4_hba.fcp_eq_hdl[index].phba = phba; atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); - rc = request_irq(phba->sli4_hba.msix_entries[index].vector, + if (phba->cfg_fof && (index == (vectors - 1))) + rc = request_irq( + phba->sli4_hba.msix_entries[index].vector, + &lpfc_sli4_fof_intr_handler, IRQF_SHARED, + (char *)&phba->sli4_hba.handler_name[index], + &phba->sli4_hba.fcp_eq_hdl[index]); + else + rc = request_irq( + phba->sli4_hba.msix_entries[index].vector, &lpfc_sli4_hba_intr_handler, IRQF_SHARED, (char *)&phba->sli4_hba.handler_name[index], &phba->sli4_hba.fcp_eq_hdl[index]); @@ -8256,6 +8852,9 @@ enable_msix_vectors: } } + if (phba->cfg_fof) + vectors--; + if (vectors != phba->cfg_fcp_io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3238 Reducing IO channels to match number of " @@ -8263,13 +8862,18 @@ enable_msix_vectors: phba->cfg_fcp_io_channel, vectors); phba->cfg_fcp_io_channel = vectors; } + + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: /* free the irq already requested */ - for (--index; index >= 0; index--) + for (--index; index >= 0; index--) { + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. + vector, NULL); free_irq(phba->sli4_hba.msix_entries[index].vector, &phba->sli4_hba.fcp_eq_hdl[index]); + } msi_fail_out: /* Unconfigure MSI-X capability structure */ @@ -8290,10 +8894,16 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) int index; /* Free up MSI-X multi-message vectors */ - for (index = 0; index < phba->cfg_fcp_io_channel; index++) + for (index = 0; index < phba->cfg_fcp_io_channel; index++) { + irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. + vector, NULL); free_irq(phba->sli4_hba.msix_entries[index].vector, &phba->sli4_hba.fcp_eq_hdl[index]); - + } + if (phba->cfg_fof) { + free_irq(phba->sli4_hba.msix_entries[index].vector, + &phba->sli4_hba.fcp_eq_hdl[index]); + } /* Disable MSI-X */ pci_disable_msix(phba->pcidev); @@ -8343,6 +8953,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) phba->sli4_hba.fcp_eq_hdl[index].phba = phba; } + if (phba->cfg_fof) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } return 0; } @@ -8425,6 +9039,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. fcp_eq_in_use, 1); } + if (phba->cfg_fof) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. + fcp_eq_in_use, 1); + } } } return intr_mode; @@ -8735,10 +9355,12 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); + sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters); sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); + sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, @@ -8998,7 +9620,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) /* Disable interrupt */ lpfc_sli_disable_intr(phba); - pci_set_drvdata(pdev, NULL); scsi_host_put(shost); /* @@ -9153,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) static void lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2723 PCI channel I/O abort preparing for recovery\n"); @@ -9163,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); } /** @@ -9187,15 +9804,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -9907,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) static void lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2828 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); } /** @@ -9940,6 +10553,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -9947,9 +10563,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -10369,32 +10982,164 @@ lpfc_io_resume(struct pci_dev *pdev) } /** - * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace - * @inode: pointer to the inode representing the lpfcmgmt device - * @filep: pointer to the file representing the open lpfcmgmt device + * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter + * @phba: pointer to lpfc hba data structure. + * + * This routine checks to see if OAS is supported for this adapter. If + * supported, the configure Flash Optimized Fabric flag is set. Otherwise, + * the enable oas flag is cleared and the pool created for OAS device data + * is destroyed. * - * This routine puts a reference count on the lpfc module whenever the - * character device is opened **/ -static int -lpfc_mgmt_open(struct inode *inode, struct file *filep) +void +lpfc_sli4_oas_verify(struct lpfc_hba *phba) +{ + + if (!phba->cfg_EnableXLane) + return; + + if (phba->sli4_hba.pc_sli4_params.oas_supported) { + phba->cfg_fof = 1; + } else { + phba->cfg_fof = 0; + if (phba->device_data_mem_pool) + mempool_destroy(phba->device_data_mem_pool); + phba->device_data_mem_pool = NULL; + } + + return; +} + +/** + * lpfc_fof_queue_setup - Set up all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the fof queues for the FC HBA + * operation. + * + * Return codes + * 0 - successful + * -ENOMEM - No available memory + **/ +int +lpfc_fof_queue_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + int rc; + + rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); + if (rc) + return -ENOMEM; + + if (phba->cfg_fof) { + + rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, + phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); + if (rc) + goto out_oas_cq; + + rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, + phba->sli4_hba.oas_cq, LPFC_FCP); + if (rc) + goto out_oas_wq; + + phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; + phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; + } + + return 0; + +out_oas_wq: + lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); +out_oas_cq: + lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); + return rc; + +} + +/** + * lpfc_fof_queue_create - Create all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the fof queues for the FC HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + * 0 - successful + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_fof_queue_create(struct lpfc_hba *phba) { - try_module_get(THIS_MODULE); + struct lpfc_queue *qdesc; + + /* Create FOF EQ */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) + goto out_error; + + phba->sli4_hba.fof_eq = qdesc; + + if (phba->cfg_fof) { + + /* Create OAS CQ */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) + goto out_error; + + phba->sli4_hba.oas_cq = qdesc; + + /* Create OAS WQ */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) + goto out_error; + + phba->sli4_hba.oas_wq = qdesc; + + } return 0; + +out_error: + lpfc_fof_queue_destroy(phba); + return -ENOMEM; } /** - * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace - * @inode: pointer to the inode representing the lpfcmgmt device - * @filep: pointer to the file representing the open lpfcmgmt device + * lpfc_fof_queue_destroy - Destroy all the fof queues + * @phba: pointer to lpfc hba data structure. * - * This routine removes a reference count from the lpfc module when the - * character device is closed + * This routine is invoked to release all the SLI4 queues with the FC HBA + * operation. + * + * Return codes + * 0 - successful **/ -static int -lpfc_mgmt_release(struct inode *inode, struct file *filep) +int +lpfc_fof_queue_destroy(struct lpfc_hba *phba) { - module_put(THIS_MODULE); + /* Release FOF Event queue */ + if (phba->sli4_hba.fof_eq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); + phba->sli4_hba.fof_eq = NULL; + } + + /* Release OAS Completion queue */ + if (phba->sli4_hba.oas_cq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); + phba->sli4_hba.oas_cq = NULL; + } + + /* Release OAS Work queue */ + if (phba->sli4_hba.oas_wq != NULL) { + lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); + phba->sli4_hba.oas_wq = NULL; + } return 0; } @@ -10515,8 +11260,7 @@ static struct pci_driver lpfc_driver = { }; static const struct file_operations lpfc_mgmt_fop = { - .open = lpfc_mgmt_open, - .release = lpfc_mgmt_release, + .owner = THIS_MODULE, }; static struct miscdevice lpfc_mgmt_dev = { @@ -10540,6 +11284,7 @@ static struct miscdevice lpfc_mgmt_dev = { static int __init lpfc_init(void) { + int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); @@ -10566,6 +11311,13 @@ lpfc_init(void) return -ENOMEM; } } + + /* Initialize in case vector mapping is needed */ + lpfc_used_cpu = NULL; + lpfc_present_cpu = 0; + for_each_present_cpu(cpu) + lpfc_present_cpu++; + error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); @@ -10604,6 +11356,7 @@ lpfc_exit(void) (1L << _dump_buf_dif_order), _dump_buf_dif); free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); } + kfree(lpfc_used_cpu); } module_init(lpfc_init); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index baf53e6c2bd..2a4e5d21eab 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -37,6 +37,7 @@ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ +#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index efc9cd9def8..1f292e29d56 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -178,7 +178,8 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mb->mbxOwner = OWN_HOST; mb->un.varDmp.cv = 1; mb->un.varDmp.type = DMP_NV_PARAMS; - mb->un.varDmp.entry_index = 0; + if (phba->sli_rev < LPFC_SLI_REV4) + mb->un.varDmp.entry_index = 0; mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; mb->un.varDmp.co = 0; @@ -361,7 +362,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) /* NEW_FEATURE * SLI-2, Coalescing Response Feature. */ - if (phba->cfg_cr_delay) { + if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) { mb->un.varCfgLnk.cr = 1; mb->un.varCfgLnk.ci = 1; mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; @@ -377,7 +378,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) mb->un.varCfgLnk.crtov = phba->fc_crtov; mb->un.varCfgLnk.citov = phba->fc_citov; - if (phba->cfg_ack0) + if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4)) mb->un.varCfgLnk.ack0_enable = 1; mb->mbxCommand = MBX_CONFIG_LINK; @@ -2126,33 +2127,44 @@ void lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) { struct lpfc_mbx_reg_vfi *reg_vfi; + struct lpfc_hba *phba = vport->phba; memset(mbox, 0, sizeof(*mbox)); reg_vfi = &mbox->u.mqe.un.reg_vfi; bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); bf_set(lpfc_reg_vfi_vfi, reg_vfi, - vport->phba->sli4_hba.vfi_ids[vport->vfi]); - bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); - bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]); + phba->sli4_hba.vfi_ids[vport->vfi]); + bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi); + bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]); memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); - reg_vfi->e_d_tov = vport->phba->fc_edtov; - reg_vfi->r_a_tov = vport->phba->fc_ratov; + reg_vfi->e_d_tov = phba->fc_edtov; + reg_vfi->r_a_tov = phba->fc_ratov; reg_vfi->bde.addrHigh = putPaddrHigh(phys); reg_vfi->bde.addrLow = putPaddrLow(phys); reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); + + /* Only FC supports upd bit */ + if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && + (vport->fc_flag & FC_VFI_REGISTERED) && + (!phba->fc_topology_changed)) { + bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); + bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); + } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " - " vfi:%d, vpi:%d, fc_pname:%x%x\n", + " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" + " port_state:x%x topology chg:%d\n", vport->fc_myDID, - vport->phba->fcf.fcfi, - vport->phba->sli4_hba.vfi_ids[vport->vfi], - vport->phba->vpi_ids[vport->vpi], - reg_vfi->wwn[0], reg_vfi->wwn[1]); + phba->fcf.fcfi, + phba->sli4_hba.vfi_ids[vport->vfi], + phba->vpi_ids[vport->vpi], + reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, + vport->port_state, phba->fc_topology_changed); } /** diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index cd86069a0ba..3fa65338d3f 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -38,10 +38,29 @@ #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_crtn.h" +#include "lpfc_logmsg.h" #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ +#define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ +int +lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { + size_t bytes; + int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + + if (max_xri <= 0) + return -ENOMEM; + bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * + sizeof(unsigned long); + phba->cfg_rrq_xri_bitmap_sz = bytes; + phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, + bytes); + if (!phba->active_rrq_pool) + return -ENOMEM; + else + return 0; +} /** * lpfc_mem_alloc - create and allocate all PCI and memory pools @@ -64,18 +83,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Calculate alignment */ + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, - phba->cfg_sg_dma_buf_size, + i, 0); - else + } else { phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, align, 0); + } + if (!phba->lpfc_scsi_dma_buf_pool) goto fail; @@ -138,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) phba->lpfc_drb_pool = NULL; } + if (phba->cfg_EnableXLane) { + phba->device_data_mem_pool = mempool_create_kmalloc_pool( + LPFC_DEVICE_DATA_POOL_SIZE, + sizeof(struct lpfc_device_data)); + if (!phba->device_data_mem_pool) + goto fail_free_hrb_pool; + } else { + phba->device_data_mem_pool = NULL; + } + return 0; fail_free_hrb_pool: pci_pool_destroy(phba->lpfc_hrb_pool); @@ -180,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba) { int i; struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + struct lpfc_device_data *device_data; /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); @@ -201,6 +239,10 @@ lpfc_mem_free(struct lpfc_hba *phba) /* Free NLP memory pool */ mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; + if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { + mempool_destroy(phba->active_rrq_pool); + phba->active_rrq_pool = NULL; + } /* Free mbox memory pool */ mempool_destroy(phba->mbox_mem_pool); @@ -219,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba) pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); phba->lpfc_scsi_dma_buf_pool = NULL; + /* Free Device Data memory pool */ + if (phba->device_data_mem_pool) { + /* Ensure all objects have been returned to the pool */ + while (!list_empty(&phba->luns)) { + device_data = list_first_entry(&phba->luns, + struct lpfc_device_data, + listentry); + list_del(&device_data->listentry); + mempool_free(device_data, phba->device_data_mem_pool); + } + mempool_destroy(phba->device_data_mem_pool); + } + phba->device_data_mem_pool = NULL; return; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 46128c67920..c342f6afd74 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -203,8 +203,6 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, int lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { - LIST_HEAD(completions); - LIST_HEAD(txcmplq_completions); LIST_HEAD(abort_list); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; @@ -216,33 +214,27 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) "Data: x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); - + /* Clean up all fabric IOs first.*/ lpfc_fabric_abort_nport(ndlp); - /* First check the txq */ + /* + * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list + * of all ELS IOs that need an ABTS. The IOs need to stay on the + * txcmplq so that the abort operation completes them successfully. + */ spin_lock_irq(&phba->hbalock); - list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { - /* Check to see if iocb matches the nport we are looking for */ - if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { - /* It matches, so deque and call compl with anp error */ - list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; - } - } - - /* Next check the txcmplq */ - list_splice_init(&pring->txcmplq, &txcmplq_completions); - spin_unlock_irq(&phba->hbalock); - - list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) { - /* Check to see if iocb matches the nport we are looking for */ + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { + /* Add to abort_list on on NDLP match. */ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) list_add_tail(&iocb->dlist, &abort_list); } - spin_lock_irq(&phba->hbalock); - list_splice(&txcmplq_completions, &pring->txcmplq); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); spin_unlock_irq(&phba->hbalock); + /* Abort the targeted IOs and remove them from the abort list. */ list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) { spin_lock_irq(&phba->hbalock); list_del_init(&iocb->dlist); @@ -250,9 +242,28 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) spin_unlock_irq(&phba->hbalock); } + INIT_LIST_HEAD(&abort_list); + + /* Now process the txq */ + spin_lock_irq(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + spin_lock(&pring->ring_lock); + + list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { + /* Check to see if iocb matches the nport we are looking for */ + if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { + list_del_init(&iocb->list); + list_add_tail(&iocb->list, &abort_list); + } + } + + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring->ring_lock); + spin_unlock_irq(&phba->hbalock); + /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); + lpfc_sli_cancel_iocbs(phba, &abort_list, + IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); return 0; @@ -333,9 +344,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, - ndlp->nlp_rpi); + ndlp->nlp_rpi, vport->port_state, + vport->fc_flag); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; @@ -575,7 +588,7 @@ out: lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -632,7 +645,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -649,7 +663,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -687,11 +702,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; if (npr->prliType == PRLI_FCP_TYPE) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; - if (npr->targetFunc) + if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } @@ -970,7 +989,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1304,7 +1323,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, if ((irsp->ulpStatus) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1510,7 +1530,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1671,12 +1692,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Check out PRLI rsp */ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + ndlp->nlp_flag &= ~NLP_FIRSTBURST; if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && (npr->prliType == PRLI_FCP_TYPE)) { if (npr->initiatorFunc) ndlp->nlp_type |= NLP_FCP_INITIATOR; - if (npr->targetFunc) + if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; + if (npr->writeXferRdyDis) + ndlp->nlp_flag |= NLP_FIRSTBURST; + } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; } @@ -2146,7 +2171,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 98af07c6e30..2df11daad85 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -24,6 +24,8 @@ #include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> +#include <linux/crc-t10dif.h> +#include <net/checksum.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -48,7 +50,7 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 -int _dump_buf_done; +int _dump_buf_done = 1; static char *dif_op_str[] = { "PROT_NORMAL", @@ -66,10 +68,23 @@ struct scsi_dif_tuple { __be32 ref_tag; /* Target LBA or indirect LBA */ }; +static struct lpfc_rport_data * +lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; + + if (vport->phba->cfg_fof) + return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; + else + return (struct lpfc_rport_data *)sdev->hostdata; +} + static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); +static int +lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); static void lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) @@ -128,6 +143,30 @@ lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) } } +static inline unsigned +lpfc_cmd_blksize(struct scsi_cmnd *sc) +{ + return sc->device->sector_size; +} + +#define LPFC_CHECK_PROTECT_GUARD 1 +#define LPFC_CHECK_PROTECT_REF 2 +static inline unsigned +lpfc_cmd_protect(struct scsi_cmnd *sc, int flag) +{ + return 1; +} + +static inline unsigned +lpfc_cmd_guard_csum(struct scsi_cmnd *sc) +{ + if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF) + return 0; + if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP) + return 1; + return 0; +} + /** * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge. * @phba: Pointer to HBA object. @@ -276,9 +315,27 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) unsigned long new_queue_depth, old_queue_depth; old_queue_depth = sdev->queue_depth; - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + /* change request from sysfs, fall through */ + case SCSI_QDEPTH_RAMP_UP: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; + case SCSI_QDEPTH_QFULL: + if (scsi_track_queue_full(sdev, qdepth) == 0) + return sdev->queue_depth; + + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "0711 detected queue full - lun queue " + "depth adjusted to %d.\n", sdev->queue_depth); + break; + default: + return -EOPNOTSUPP; + } + new_queue_depth = sdev->queue_depth; - rdata = sdev->hostdata; + rdata = lpfc_rport_data_from_scsi_device(sdev); if (rdata) lpfc_send_sdev_queuedepth_change_event(phba, vport, rdata->pnode, sdev->lun, @@ -349,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba) } /** - * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread - * @phba: The Hba for which this call is being executed. - * - * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine - * post at most 1 event every 5 minute after last_ramp_up_time or - * last_rsrc_error_time. This routine wakes up worker thread of @phba - * to process WORKER_RAM_DOWN_EVENT event. - * - * This routine should be called with no lock held. - **/ -static inline void -lpfc_rampup_queue_depth(struct lpfc_vport *vport, - uint32_t queue_depth) -{ - unsigned long flags; - struct lpfc_hba *phba = vport->phba; - uint32_t evt_posted; - atomic_inc(&phba->num_cmd_success); - - if (vport->cfg_lun_queue_depth <= queue_depth) - return; - spin_lock_irqsave(&phba->hbalock, flags); - if (time_before(jiffies, - phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || - time_before(jiffies, - phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { - spin_unlock_irqrestore(&phba->hbalock, flags); - return; - } - phba->last_ramp_up_time = jiffies; - spin_unlock_irqrestore(&phba->hbalock, flags); - - spin_lock_irqsave(&phba->pport->work_port_lock, flags); - evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; - if (!evt_posted) - phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; - spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - - if (!evt_posted) - lpfc_worker_wake_up(phba); - return; -} - -/** * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler * @phba: The Hba for which this call is being executed. * @@ -444,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) } /** - * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler - * @phba: The Hba for which this call is being executed. - * - * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker - * thread.This routine increases queue depth for all scsi device on each vport - * associated with @phba by 1. This routine also sets @phba num_rsrc_err and - * num_cmd_success to zero. - **/ -void -lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) -{ - struct lpfc_vport **vports; - struct Scsi_Host *shost; - struct scsi_device *sdev; - int i; - - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { - shost = lpfc_shost_from_vport(vports[i]); - shost_for_each_device(sdev, shost) { - if (vports[i]->cfg_lun_queue_depth <= - sdev->queue_depth) - continue; - lpfc_change_queue_depth(sdev, - sdev->queue_depth+1, - SCSI_QDEPTH_RAMP_UP); - } - } - lpfc_destroy_vport_work_array(phba, vports); - atomic_set(&phba->num_rsrc_err, 0); - atomic_set(&phba->num_cmd_success, 0); -} - -/** * lpfc_scsi_dev_block - set all scsi hosts to block state * @phba: Pointer to HBA context object. * @@ -534,7 +512,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; - int bcnt; + int bcnt, bpl_size; + + bpl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); @@ -732,7 +719,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); psb->exch_busy = 0; spin_unlock_irqrestore(&phba->hbalock, iflag); - if (pring->txq_cnt) + if (!list_empty(&pring->txq)) lpfc_worker_wake_up(phba); return; @@ -759,7 +746,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, struct list_head *post_sblist, int sb_count) { struct lpfc_scsi_buf *psb, *psb_next; - int status; + int status, sgl_size; int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; dma_addr_t pdma_phys_bpl1; int last_xritag = NO_XRI; @@ -771,6 +758,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, if (sb_count <= 0) return -EINVAL; + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + list_for_each_entry_safe(psb, psb_next, post_sblist, list) { list_del_init(&psb->list); block_cnt++; @@ -803,7 +793,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, post_cnt = block_cnt; } else if (block_cnt == 1) { /* last single sgl with non-contiguous xri */ - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + if (sgl_size > SGL_PAGE_SIZE) pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; else @@ -885,9 +875,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) int num_posted, rc = 0; /* get all SCSI buffers need to repost to a local list */ - spin_lock(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); - spin_unlock(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); + list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); + spin_unlock(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); /* post the list of scsi buffer sgls to port if available */ if (!list_empty(&post_sblist)) { @@ -923,13 +916,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + dma_addr_t pdma_phys_bpl; uint16_t iotag, lxri = 0; - int bcnt, num_posted; + int bcnt, num_posted, sgl_size; LIST_HEAD(prep_sblist); LIST_HEAD(post_sblist); LIST_HEAD(scsi_sblist); + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp)); + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) @@ -948,15 +950,19 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) } memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { + /* + * 4K Page alignment is CRITICAL to BlockGuard, double check + * to be sure. + */ + if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + psb->data, psb->dma_handle); kfree(psb); break; } + lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, @@ -964,21 +970,31 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) kfree(psb); break; } + + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "3368 Failed to allocated IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); + break; + } psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_cmnd = (psb->data + sgl_size); psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + sizeof(struct fcp_cmnd)); /* Initialize local short-hand pointers. */ sgl = (struct sli4_sge *)psb->fcp_bpl; pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = - (psb->dma_handle + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); /* @@ -1020,17 +1036,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpLe = 1; iocb->ulpClass = CLASS3; psb->cur_iocbq.context1 = psb; - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; psb->dma_phys_bpl = pdma_phys_bpl; /* add the scsi buffer to a post list */ list_add_tail(&psb->list, &post_sblist); - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt++; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_BG, "3021 Allocate %d out of %d requested new SCSI " @@ -1079,17 +1091,22 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf * lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; + struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - if (lpfc_cmd) { - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, + list); + if (!lpfc_cmd) { + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + list_remove_head(scsi_buf_list_get, lpfc_cmd, + struct lpfc_scsi_buf, list); + spin_unlock(&phba->scsi_buf_list_put_lock); } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); return lpfc_cmd; } /** @@ -1106,29 +1123,40 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { - struct lpfc_scsi_buf *lpfc_cmd ; + struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; unsigned long iflag = 0; int found = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, - list) { + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); + list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, + &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del(&lpfc_cmd->list); found = 1; - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; break; } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); + if (!found) { + spin_lock(&phba->scsi_buf_list_put_lock); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, + &phba->lpfc_scsi_buf_list_get, list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_cmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); if (!found) return NULL; - else - return lpfc_cmd; + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1160,10 +1188,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } /** @@ -1181,6 +1214,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + if (psb->exch_busy) { spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); @@ -1190,11 +1227,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); } else { - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } } @@ -1268,6 +1305,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -1350,12 +1388,6 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) return 0; } -static inline unsigned -lpfc_cmd_blksize(struct scsi_cmnd *sc) -{ - return sc->device->sector_size; -} - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS /* Return if if error injection is detected by Initiator */ @@ -1420,7 +1452,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, } /* Next check if we need to match the remote NPortID or WWPN */ - rdata = sc->device->hostdata; + rdata = lpfc_rport_data_from_scsi_device(sc->device); if (rdata && rdata->pnode) { ndlp = rdata->pnode; @@ -1788,10 +1820,9 @@ static int lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint8_t *txop, uint8_t *rxop) { - uint8_t guard_type = scsi_host_get_guard(sc->device->host); uint8_t ret = 0; - if (guard_type == SHOST_DIX_GUARD_IP) { + if (lpfc_cmd_guard_csum(sc)) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: @@ -1869,10 +1900,9 @@ static int lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint8_t *txop, uint8_t *rxop) { - uint8_t guard_type = scsi_host_get_guard(sc->device->host); uint8_t ret = 0; - if (guard_type == SHOST_DIX_GUARD_IP) { + if (lpfc_cmd_guard_csum(sc)) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: @@ -2013,9 +2043,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ if (datadir == DMA_FROM_DEVICE) { - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); } bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); @@ -2145,6 +2187,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_bde >= (phba->cfg_total_seg_cnt - 2)) + return num_bde + 3; + /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); @@ -2164,8 +2210,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); @@ -2213,6 +2268,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_bde >= phba->cfg_total_seg_cnt) + return num_bde + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9065 BLKGRD:%s Invalid data segment\n", @@ -2324,7 +2383,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; int i = 0, num_sge = 0, status; - int datadir = sc->sc_data_direction; uint32_t reftag; unsigned blksize; uint8_t txop, rxop; @@ -2362,13 +2420,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + } + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - if (datadir == DMA_FROM_DEVICE) { - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); - } + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2497,6 +2568,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_sge >= (phba->cfg_total_seg_cnt - 2)) + return num_sge + 3; + /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); @@ -2506,11 +2581,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) { + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + + } else { + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + /* + * When in this mode, the hardware will replace + * the guard tag from the host with a + * newly generated good CRC for the wire. + * Switch to raw mode here to avoid this + * behavior. What the host sends gets put on the wire. + */ + if (txop == BG_OP_IN_CRC_OUT_CRC) { + txop = BG_OP_RAW_MODE; + rxop = BG_OP_RAW_MODE; + } + } + + + if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2556,6 +2654,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_sge >= phba->cfg_total_seg_cnt) + return num_sge + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9086 BLKGRD:%s Invalid data segment\n", @@ -2661,15 +2763,57 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) ret = LPFC_PG_TYPE_DIF_BUF; break; default: - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9021 Unsupported protection op:%d\n", op); + if (phba) + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "9021 Unsupported protection op:%d\n", + op); break; } - return ret; } /** + * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be adjusted. + * + * Adjust the data length to account for how much data + * is actually on the wire. + * + * returns the adjusted data length + **/ +static int +lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *sc = lpfc_cmd->pCmd; + int fcpdl; + + fcpdl = scsi_bufflen(sc); + + /* Check if there is protection data on the wire */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + /* Read check for protection data */ + if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + return fcpdl; + + } else { + /* Write check for protection data */ + if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + return fcpdl; + } + + /* + * If we are in DIF Type 1 mode every data block has a 8 byte + * DIF (trailer) attached to it. Must ajust FCP data length + * to account for the protection data. + */ + fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; + + return fcpdl; +} + +/** * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be prep'ed. @@ -2689,8 +2833,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; - int diflen, fcpdl; - unsigned blksize; + int fcpdl; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -2711,28 +2854,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9067 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: + + /* Here we need to add a PDE5 and PDE6 to the count */ + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) + goto err; + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ if (num_bde < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -2747,31 +2890,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9068 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* + * There is a minimun of 4 BPLs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 4) > + (phba->cfg_total_seg_cnt - 2)) + goto err; num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_bde < 3) || + (num_bde > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9022 Unexpected protection group %i\n", prot_group_type); @@ -2790,18 +2930,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcpdl = scsi_bufflen(scsi_cmnd); - - if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { - /* - * We are in DIF Type 1 mode - * Every data block has a 8 byte DIF (trailer) - * attached to it. Must ajust FCP data length - */ - blksize = lpfc_cmd_blksize(scsi_cmnd); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - } + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -2812,14 +2941,234 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9023 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", + "9023 Cannot setup S/G List for HBA" + "IO segs %d/%d BPL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_bde); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } /* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CRC algorithmn + * using crc_t10dif. + */ +uint16_t +lpfc_bg_crc(uint8_t *data, int count) +{ + uint16_t crc = 0; + uint16_t x; + + crc = crc_t10dif(data, count); + x = cpu_to_be16(crc); + return x; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CSUM algorithmn + * using ip_compute_csum. + */ +uint16_t +lpfc_bg_csum(uint8_t *data, int count) +{ + uint16_t ret; + + ret = ip_compute_csum(data, count); + return ret; +} + +/* + * This function examines the protection data to try to determine + * what type of T10-DIF error occurred. + */ +void +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct scsi_dif_tuple *src = NULL; + uint8_t *data_src = NULL; + uint16_t guard_tag, guard_type; + uint16_t start_app_tag, app_tag; + uint32_t start_ref_tag, ref_tag; + int prot, protsegcnt; + int err_type, len, data_len; + int chk_ref, chk_app, chk_guard; + uint16_t sum; + unsigned blksize; + + err_type = BGS_GUARD_ERR_MASK; + sum = 0; + guard_tag = 0; + + /* First check to see if there is protection data to examine */ + prot = scsi_get_prot_op(cmd); + if ((prot == SCSI_PROT_READ_STRIP) || + (prot == SCSI_PROT_WRITE_INSERT) || + (prot == SCSI_PROT_NORMAL)) + goto out; + + /* Currently the driver just supports ref_tag and guard_tag checking */ + chk_ref = 1; + chk_app = 0; + chk_guard = 0; + + /* Setup a ptr to the protection data provided by the SCSI host */ + sgpe = scsi_prot_sglist(cmd); + protsegcnt = lpfc_cmd->prot_seg_cnt; + + if (sgpe && protsegcnt) { + + /* + * We will only try to verify guard tag if the segment + * data length is a multiple of the blksize. + */ + sgde = scsi_sglist(cmd); + blksize = lpfc_cmd_blksize(cmd); + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + guard_type = scsi_host_get_guard(cmd->device->host); + + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */ + start_app_tag = src->app_tag; + len = sgpe->length; + while (src && protsegcnt) { + while (len) { + + /* + * First check to see if a protection data + * check is valid + */ + if ((src->ref_tag == 0xffffffff) || + (src->app_tag == 0xffff)) { + start_ref_tag++; + goto skipit; + } + + /* First Guard Tag checking */ + if (chk_guard) { + guard_tag = src->guard_tag; + if (lpfc_cmd_guard_csum(cmd)) + sum = lpfc_bg_csum(data_src, + blksize); + else + sum = lpfc_bg_crc(data_src, + blksize); + if ((guard_tag != sum)) { + err_type = BGS_GUARD_ERR_MASK; + goto out; + } + } + + /* Reference Tag checking */ + ref_tag = be32_to_cpu(src->ref_tag); + if (chk_ref && (ref_tag != start_ref_tag)) { + err_type = BGS_REFTAG_ERR_MASK; + goto out; + } + start_ref_tag++; + + /* App Tag checking */ + app_tag = src->app_tag; + if (chk_app && (app_tag != start_app_tag)) { + err_type = BGS_APPTAG_ERR_MASK; + goto out; + } +skipit: + len -= sizeof(struct scsi_dif_tuple); + if (len < 0) + len = 0; + src++; + + data_src += blksize; + data_len -= blksize; + + /* + * Are we at the end of the Data segment? + * The data segment is only used for Guard + * tag checking. + */ + if (chk_guard && (data_len == 0)) { + chk_guard = 0; + sgde = sg_next(sgde); + if (!sgde) + goto out; + + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + } + } + + /* Goto the next Protection data segment */ + sgpe = sg_next(sgpe); + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + } else { + src = NULL; + } + protsegcnt--; + } + } +out: + if (err_type == BGS_GUARD_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x1); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + sum, guard_tag); + + } else if (err_type == BGS_REFTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x3); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + ref_tag, start_ref_tag); + + } else if (err_type == BGS_APPTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x2); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9041 BLKGRD: LBA %lx app_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + app_tag, start_app_tag); + } +} + + +/* * This function checks for BlockGuard errors detected by * the HBA. In case of errors, the ASC/ASCQ fields in the * sense buffer will be set accordingly, paired with @@ -2842,12 +3191,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" - " 0x%x lba 0x%llx blk cnt 0x%x " - "bgstat=0x%x bghm=0x%x\n", - cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); - spin_lock(&_dump_buf_lock); if (!_dump_buf_done) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" @@ -2870,18 +3213,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" - " BlockGuard profile. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9072 BLKGRD: Invalid BG Profile in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " - "Invalid BlockGuard DIF Block. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9073 BLKGRD: Invalid BG PDIF Block in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } @@ -2894,8 +3243,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->result = DRIVER_SENSE << 24 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_guard_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9055 BLKGRD: guard_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9055 BLKGRD: Guard Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -2907,8 +3260,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_reftag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9056 BLKGRD: ref_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9056 BLKGRD: Ref Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -2920,8 +3277,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_apptag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9061 BLKGRD: app_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9061 BLKGRD: App Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -2960,11 +3321,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (!ret) { /* No error was reported - problem in FW? */ - cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9057 BLKGRD: Unknown error reported!\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9057 BLKGRD: Unknown error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); + + /* Calcuate what type of error it was */ + lpfc_calc_bg_err(phba, lpfc_cmd); } - out: return ret; } @@ -3028,6 +3394,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -3090,46 +3457,15 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); - return 0; -} -/** - * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard - * @phba: The Hba for which this call is being executed. - * @lpfc_cmd: The scsi buffer which is going to be adjusted. - * - * Adjust the data length to account for how much data - * is actually on the wire. - * - * returns the adjusted data length - **/ -static int -lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) -{ - struct scsi_cmnd *sc = lpfc_cmd->pCmd; - int diflen, fcpdl; - unsigned blksize; - - fcpdl = scsi_bufflen(sc); - - /* Check if there is protection data on the wire */ - if (sc->sc_data_direction == DMA_FROM_DEVICE) { - /* Read */ - if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) - return fcpdl; - - } else { - /* Write */ - if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) - return fcpdl; - } - - /* If protection data on the wire, adjust the count accordingly */ - blksize = lpfc_cmd_blksize(sc); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - return fcpdl; + /* + * If the OAS driver feature is enabled and the lun is enabled for + * OAS, set the oas iocb related flags. + */ + if ((phba->cfg_fof) && ((struct lpfc_device_data *) + scsi_cmnd->device->hostdata)->oas_enabled) + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; + return 0; } /** @@ -3149,14 +3485,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; - uint32_t num_bde = 0; + uint32_t num_sge = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd - * fcp_rsp regions to the first data bde entry + * fcp_rsp regions to the first data sge entry */ if (scsi_sg_count(scsi_cmnd)) { /* @@ -3179,28 +3515,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, sgl += 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9087 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: - num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, + /* Here we need to add a DISEED to the count */ + if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) + goto err; + + num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, datasegcnt); + /* we should have 2 or more entries in buffer list */ - if (num_bde < 2) + if (num_sge < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -3215,31 +3551,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9088 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + /* + * There is a minimun of 3 SGEs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) + goto err; - num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, + num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, datasegcnt, protsegcnt); + /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_sge < 3) || + (num_sge > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9083 Unexpected protection group %i\n", prot_group_type); @@ -3263,7 +3596,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -3274,10 +3606,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9084 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", - prot_group_type, num_bde); + "9084 Cannot setup S/G List for HBA" + "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_sge); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } @@ -3586,11 +3930,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, /* * Check SLI validation that all the transfer was actually done - * (fcpi_parm should be zero). + * (fcpi_parm should be zero). Apply check only to reads. */ - } else if (fcpi_parm) { + } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, - "9029 FCP Data Transfer Check Error: " + "9029 FCP Read Check Error Data: " "x%x x%x x%x x%x x%x\n", be32_to_cpu(fcpcmd->fcpDl), be32_to_cpu(fcprsp->rspResId), @@ -3635,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct lpfc_nodelist *pnode = rdata->pnode; struct scsi_cmnd *cmd; int result; - struct scsi_device *tmp_sdev; int depth; unsigned long flags; struct lpfc_fast_path_event *fast_path_evt; @@ -3710,7 +4053,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->device ? cmd->device->id : 0xffff, cmd->device ? cmd->device->lun : 0xffff, lpfc_cmd->status, lpfc_cmd->result, - vport->fc_myDID, pnode->nlp_DID, + vport->fc_myDID, + (pnode) ? pnode->nlp_DID : 0, phba->sli_rev == LPFC_SLI_REV4 ? lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, pIocbOut->iocb.ulpContext, @@ -3879,32 +4223,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, return; } - if (!result) - lpfc_rampup_queue_depth(vport, queue_depth); - - /* - * Check for queue full. If the lun is reporting queue full, then - * back off the lun queue depth to prevent target overloads. - */ - if (result == SAM_STAT_TASK_SET_FULL && pnode && - NLP_CHK_NODE_ACT(pnode)) { - shost_for_each_device(tmp_sdev, shost) { - if (tmp_sdev->id != scsi_id) - continue; - depth = scsi_track_queue_full(tmp_sdev, - tmp_sdev->queue_depth-1); - if (depth <= 0) - continue; - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0711 detected queue full - lun queue " - "depth adjusted to %d.\n", depth); - lpfc_send_sdev_queuedepth_change_event(phba, vport, - pnode, - tmp_sdev->lun, - depth+1, depth); - } - } - spin_lock_irqsave(&phba->hbalock, flags); lpfc_cmd->pCmd = NULL; spin_unlock_irqrestore(&phba->hbalock, flags); @@ -3961,6 +4279,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, char tag[2]; uint8_t *ptr; bool sli4; + uint32_t fcpdl; if (!pnode || !NLP_CHK_NODE_ACT(pnode)) return; @@ -3995,6 +4314,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, fcp_cmnd->fcpCntl1 = SIMPLE_Q; sli4 = (phba->sli_rev == LPFC_SLI_REV4); + piocbq->iocb.un.fcpi.fcpi_XRdy = 0; /* * There are three possibilities here - use scatter-gather segment, use @@ -4005,11 +4325,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; - if (sli4) - iocb_cmd->ulpPU = PARM_READ_CHECK; - else { - iocb_cmd->un.fcpi.fcpi_parm = 0; - iocb_cmd->ulpPU = 0; + iocb_cmd->ulpPU = PARM_READ_CHECK; + if (vport->cfg_first_burst_size && + (pnode->nlp_flag & NLP_FIRSTBURST)) { + fcpdl = scsi_bufflen(scsi_cmnd); + if (fcpdl < vport->cfg_first_burst_size) + piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; + else + piocbq->iocb.un.fcpi.fcpi_XRdy = + vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; @@ -4098,10 +4422,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, piocb->ulpContext = vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; } - if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { - piocb->ulpFCP2Rcvy = 1; - } + piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0; piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); + piocb->ulpPU = 0; + piocb->un.fcpi.fcpi_parm = 0; /* ulpTimeout is only one byte */ if (lpfc_cmd->timeout > 0xff) { @@ -4246,7 +4570,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) unsigned long poll_tmo_expires = (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); - if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt) + if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) mod_timer(&phba->fcp_poll_timer, poll_tmo_expires); } @@ -4301,12 +4625,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) { struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp; struct lpfc_scsi_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); int err; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); err = fc_remote_port_chkready(rport); if (err) { cmnd->result = err; @@ -4357,7 +4682,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9033 BLKGRD: rcvd %s cmd:x%x " "sector x%llx cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], @@ -4369,7 +4695,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x sector x%llx cnt %u pt %x\n", cmnd->cmnd[0], @@ -4390,6 +4717,24 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) { atomic_dec(&ndlp->cmd_pending); + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "3376 FCP could not issue IOCB err %x" + "FCP cmd x%x <%d/%d> " + "sid: x%x did: x%x oxid: x%x " + "Data: x%x x%x x%x x%x\n", + err, cmnd->cmnd[0], + cmnd->device ? cmnd->device->id : 0xffff, + cmnd->device ? cmnd->device->lun : 0xffff, + vport->fc_myDID, ndlp->nlp_DID, + phba->sli_rev == LPFC_SLI_REV4 ? + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, + lpfc_cmd->cur_iocbq.iocb.ulpContext, + lpfc_cmd->cur_iocbq.iocb.ulpIoTag, + lpfc_cmd->cur_iocbq.iocb.ulpTimeout, + (uint32_t) + (cmnd->request->timeout / 1000)); + + goto out_host_busy_free_buf; } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { @@ -4438,7 +4783,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_buf *lpfc_cmd; IOCB_t *cmd, *icmd; int ret = SUCCESS, status = 0; - unsigned long flags; + struct lpfc_sli_ring *pring_s4; + int ring_number, ret_val; + unsigned long flags, iflags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); status = fc_block_scsi_eh(cmnd); @@ -4489,12 +4836,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) BUG_ON(iocb->context1 != lpfc_cmd); + /* abort issued in recovery is still in progress */ + if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3389 SCSI Layer I/O Abort Request is pending\n"); + spin_unlock_irqrestore(&phba->hbalock, flags); + goto wait_for_cmpl; + } + abtsiocb = __lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { ret = FAILED; goto out_unlock; } + /* Indicate the IO is being aborted by the driver. */ + iocb->iocb_flag |= LPFC_DRIVER_ABORTED; + /* * The scsi command can not be in txq and it is in flight because the * pCmd is still pointig at the SCSI command we have to abort. There @@ -4524,11 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; + if (phba->sli_rev == LPFC_SLI_REV4) { + ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; + pring_s4 = &phba->sli.ring[ring_number]; + /* Note: both hbalock and ring_lock must be set here */ + spin_lock_irqsave(&pring_s4->ring_lock, iflags); + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocb, 0); + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); + } else { + ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, + abtsiocb, 0); + } /* no longer need the lock after this point */ spin_unlock_irqrestore(&phba->hbalock, flags); - if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == - IOCB_ERROR) { + + if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; @@ -4538,12 +4908,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) lpfc_sli_handle_fast_ring_event(phba, &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); +wait_for_cmpl: lpfc_cmd->waitq = &waitq; /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), - (2*vport->cfg_devloss_tmo*HZ)); + msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); + + spin_lock_irqsave(shost->host_lock, flags); lpfc_cmd->waitq = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; @@ -4589,6 +4963,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) } } + +/** + * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * + * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded + * + * Return code : + * 0x2003 - Error + * 0x2002 - Success + **/ +static int +lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; + uint32_t rsp_info; + uint32_t rsp_len; + uint8_t rsp_info_code; + int ret = FAILED; + + + if (fcprsp == NULL) + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0703 fcp_rsp is missing\n"); + else { + rsp_info = fcprsp->rspStatus2; + rsp_len = be32_to_cpu(fcprsp->rspRspLen); + rsp_info_code = fcprsp->rspInfo3; + + + lpfc_printf_vlog(vport, KERN_INFO, + LOG_FCP, + "0706 fcp_rsp valid 0x%x," + " rsp len=%d code 0x%x\n", + rsp_info, + rsp_len, rsp_info_code); + + if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) { + switch (rsp_info_code) { + case RSP_NO_FAILURE: + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0715 Task Mgmt No Failure\n"); + ret = SUCCESS; + break; + case RSP_TM_NOT_SUPPORTED: /* TM rejected */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0716 Task Mgmt Target " + "reject\n"); + break; + case RSP_TM_NOT_COMPLETED: /* TM failed */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0717 Task Mgmt Target " + "failed TM\n"); + break; + case RSP_TM_INVALID_LU: /* TM to invalid LU! */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "0718 Task Mgmt to invalid " + "LUN\n"); + break; + } + } + } + return ret; +} + + /** * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler * @vport: The virtual port for which this call is being executed. @@ -4623,7 +5064,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); if (lpfc_cmd == NULL) return FAILED; - lpfc_cmd->timeout = 60; + lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; lpfc_cmd->rdata = rdata; status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, @@ -4639,6 +5080,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, lpfc_release_scsi_buf(phba, lpfc_cmd); return FAILED; } + iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %d " @@ -4649,13 +5091,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); - if (status != IOCB_SUCCESS) { - if (status == IOCB_TIMEDOUT) { - iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; - ret = TIMEOUT_ERROR; - } else - ret = FAILED; - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + if ((status != IOCB_SUCCESS) || + (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0727 TMF %s to TGT %d LUN %d failed (%d, %d) " "iocb_flag x%x\n", @@ -4663,9 +5100,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, tgt_id, lun_id, iocbqrsp->iocb.ulpStatus, iocbqrsp->iocb.un.ulpWord[4], iocbq->iocb_flag); - } else if (status == IOCB_BUSY) - ret = FAILED; - else + /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ + if (status == IOCB_SUCCESS) { + if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) + /* Something in the FCP_RSP was invalid. + * Check conditions */ + ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); + else + ret = FAILED; + } else if (status == IOCB_TIMEDOUT) { + ret = TIMEOUT_ERROR; + } else { + ret = FAILED; + } + lpfc_cmd->status = IOSTAT_DRIVER_REJECT; + } else ret = SUCCESS; lpfc_sli_release_iocbq(phba, iocbqrsp); @@ -4691,10 +5140,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, static int lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) { - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned long later; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0797 Tgt Map rport failure: rdata x%p\n", rdata); @@ -4712,7 +5162,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) if (pnode->nlp_state == NLP_STE_MAPPED_NODE) return SUCCESS; schedule_timeout_uninterruptible(msecs_to_jiffies(500)); - rdata = cmnd->device->hostdata; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) return FAILED; pnode = rdata->pnode; @@ -4749,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], - tgt_id, lun_id, context); + lpfc_sli_abort_taskmgmt(vport, + &phba->sli.ring[phba->sli.fcp_ring], + tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); @@ -4784,13 +5235,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status, ret = SUCCESS; + int status; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0798 Device Reset rport failure: rdata x%p\n", rdata); @@ -4830,9 +5282,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_LUN); - return ret; + + return status; } /** @@ -4851,13 +5305,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_rport_data *rdata = cmnd->device->hostdata; + struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; unsigned tgt_id = cmnd->device->id; unsigned int lun_id = cmnd->device->lun; struct lpfc_scsi_event_header scsi_event; - int status, ret = SUCCESS; + int status; + rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0799 Target Reset rport failure: rdata x%p\n", rdata); @@ -4897,9 +5352,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) * So, continue on. * We will report success if all the i/o aborts successfully. */ - ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, + if (status == SUCCESS) + status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, LPFC_CTX_TGT); - return ret; + return status; } /** @@ -5012,16 +5468,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; int rc, ret = SUCCESS; + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3172 SCSI layer issued Host Reset Data:\n"); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) ret = FAILED; - lpfc_online(phba); + rc = lpfc_online(phba); + if (rc) + ret = FAILED; lpfc_unblock_mgmt_io(phba); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3172 SCSI layer issued Host Reset Data: x%x\n", ret); + if (ret == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3323 Failed host reset, bring it offline\n"); + lpfc_sli4_offline_eratt(phba); + } return ret; } @@ -5048,11 +5512,45 @@ lpfc_slave_alloc(struct scsi_device *sdev) uint32_t num_to_alloc = 0; int num_allocated = 0; uint32_t sdev_cnt; + struct lpfc_device_data *device_data; + unsigned long flags; + struct lpfc_name target_wwpn; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - sdev->hostdata = rport->dd_data; + if (phba->cfg_fof) { + + /* + * Check to see if the device data structure for the lun + * exists. If not, create one. + */ + + u64_to_wwn(rport->port_name, target_wwpn.u.wwn); + spin_lock_irqsave(&phba->devicelock, flags); + device_data = __lpfc_get_device_data(phba, + &phba->luns, + &vport->fc_portname, + &target_wwpn, + sdev->lun); + if (!device_data) { + spin_unlock_irqrestore(&phba->devicelock, flags); + device_data = lpfc_create_device_data(phba, + &vport->fc_portname, + &target_wwpn, + sdev->lun, true); + if (!device_data) + return -ENOMEM; + spin_lock_irqsave(&phba->devicelock, flags); + list_add_tail(&device_data->listentry, &phba->luns); + } + device_data->rport_data = rport->dd_data; + device_data->available = true; + spin_unlock_irqrestore(&phba->devicelock, flags); + sdev->hostdata = device_data; + } else { + sdev->hostdata = rport->dd_data; + } sdev_cnt = atomic_inc_return(&phba->sdev_cnt); /* @@ -5088,11 +5586,11 @@ lpfc_slave_alloc(struct scsi_device *sdev) } num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0708 Allocation request of %d " - "command buffers did not succeed. " - "Allocated %d buffers.\n", - num_to_alloc, num_allocated); + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } if (num_allocated > 0) phba->total_scsi_bufs += num_allocated; @@ -5142,11 +5640,344 @@ lpfc_slave_destroy(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; + unsigned long flags; + struct lpfc_device_data *device_data = sdev->hostdata; + atomic_dec(&phba->sdev_cnt); + if ((phba->cfg_fof) && (device_data)) { + spin_lock_irqsave(&phba->devicelock, flags); + device_data->available = false; + if (!device_data->oas_enabled) + lpfc_delete_device_data(phba, device_data); + spin_unlock_irqrestore(&phba->devicelock, flags); + } sdev->hostdata = NULL; return; } +/** + * lpfc_create_device_data - creates and initializes device data structure for OAS + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * @atomic_create: Flag to indicate if memory should be allocated using the + * GFP_ATOMIC flag or not. + * + * This routine creates a device data structure which will contain identifying + * information for the device (host wwpn, target wwpn, lun), state of OAS, + * whether or not the corresponding lun is available by the system, + * and pointer to the rport data. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun, + bool atomic_create) +{ + + struct lpfc_device_data *lun_info; + int memory_flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !(phba->cfg_fof)) + return NULL; + + /* Attempt to create the device data to contain lun info */ + + if (atomic_create) + memory_flags = GFP_ATOMIC; + else + memory_flags = GFP_KERNEL; + lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); + if (!lun_info) + return NULL; + INIT_LIST_HEAD(&lun_info->listentry); + lun_info->rport_data = NULL; + memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)); + lun_info->device_id.lun = lun; + lun_info->oas_enabled = false; + lun_info->available = false; + return lun_info; +} + +/** + * lpfc_delete_device_data - frees a device data structure for OAS + * @pha: Pointer to host bus adapter structure. + * @lun_info: Pointer to device data structure to free. + * + * This routine frees the previously allocated device data structure passed. + * + **/ +void +lpfc_delete_device_data(struct lpfc_hba *phba, + struct lpfc_device_data *lun_info) +{ + + if (unlikely(!phba) || !lun_info || + !(phba->cfg_fof)) + return; + + if (!list_empty(&lun_info->listentry)) + list_del(&lun_info->listentry); + mempool_free(lun_info, phba->device_data_mem_pool); + return; +} + +/** + * __lpfc_get_device_data - returns the device data for the specified lun + * @pha: Pointer to host bus adapter structure. + * @list: Point to list to search. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * + * This routine searches the list passed for the specified lun's device data. + * This function does not hold locks, it is the responsibility of the caller + * to ensure the proper lock is held before calling the function. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, + struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun) +{ + + struct lpfc_device_data *lun_info; + + if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return NULL; + + /* Check to see if the lun is already enabled for OAS. */ + + list_for_each_entry(lun_info, list, listentry) { + if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)) == 0) && + (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)) == 0) && + (lun_info->device_id.lun == lun)) + return lun_info; + } + + return NULL; +} + +/** + * lpfc_find_next_oas_lun - searches for the next oas lun + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @starting_lun: Pointer to the lun to start searching for + * @found_vport_wwpn: Pointer to the found lun's vport wwpn information + * @found_target_wwpn: Pointer to the found lun's target wwpn information + * @found_lun: Pointer to the found lun. + * @found_lun_status: Pointer to status of the found lun. + * + * This routine searches the luns list for the specified lun + * or the first lun for the vport/target. If the vport wwpn contains + * a zero value then a specific vport is not specified. In this case + * any vport which contains the lun will be considered a match. If the + * target wwpn contains a zero value then a specific target is not specified. + * In this case any target which contains the lun will be considered a + * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status + * are returned. The function will also return the next lun if available. + * If the next lun is not found, starting_lun parameter will be set to + * NO_MORE_OAS_LUN. + * + * Return codes: + * non-0 - Error + * 0 - Success + **/ +bool +lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t *starting_lun, + struct lpfc_name *found_vport_wwpn, + struct lpfc_name *found_target_wwpn, + uint64_t *found_lun, + uint32_t *found_lun_status) +{ + + unsigned long flags; + struct lpfc_device_data *lun_info; + struct lpfc_device_id *device_id; + uint64_t lun; + bool found = false; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !starting_lun || !found_vport_wwpn || + !found_target_wwpn || !found_lun || !found_lun_status || + (*starting_lun == NO_MORE_OAS_LUN) || + !phba->cfg_fof) + return false; + + lun = *starting_lun; + *found_lun = NO_MORE_OAS_LUN; + *starting_lun = NO_MORE_OAS_LUN; + + /* Search for lun or the lun closet in value */ + + spin_lock_irqsave(&phba->devicelock, flags); + list_for_each_entry(lun_info, &phba->luns, listentry) { + if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || + (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, + sizeof(struct lpfc_name)) == 0)) && + ((wwn_to_u64(target_wwpn->u.wwn) == 0) || + (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, + sizeof(struct lpfc_name)) == 0)) && + (lun_info->oas_enabled)) { + device_id = &lun_info->device_id; + if ((!found) && + ((lun == FIND_FIRST_OAS_LUN) || + (device_id->lun == lun))) { + *found_lun = device_id->lun; + memcpy(found_vport_wwpn, + &device_id->vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(found_target_wwpn, + &device_id->target_wwpn, + sizeof(struct lpfc_name)); + if (lun_info->available) + *found_lun_status = + OAS_LUN_STATUS_EXISTS; + else + *found_lun_status = 0; + if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) + memset(vport_wwpn, 0x0, + sizeof(struct lpfc_name)); + if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) + memset(target_wwpn, 0x0, + sizeof(struct lpfc_name)); + found = true; + } else if (found) { + *starting_lun = device_id->lun; + memcpy(vport_wwpn, &device_id->vport_wwpn, + sizeof(struct lpfc_name)); + memcpy(target_wwpn, &device_id->target_wwpn, + sizeof(struct lpfc_name)); + break; + } + } + } + spin_unlock_irqrestore(&phba->devicelock, flags); + return found; +} + +/** + * lpfc_enable_oas_lun - enables a lun for OAS operations + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * + * This routine enables a lun for oas operations. The routines does so by + * doing the following : + * + * 1) Checks to see if the device data for the lun has been created. + * 2) If found, sets the OAS enabled flag if not set and returns. + * 3) Otherwise, creates a device data structure. + * 4) If successfully created, indicates the device data is for an OAS lun, + * indicates the lun is not available and add to the list of luns. + * + * Return codes: + * false - Error + * true - Success + **/ +bool +lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun) +{ + + struct lpfc_device_data *lun_info; + unsigned long flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return false; + + spin_lock_irqsave(&phba->devicelock, flags); + + /* Check to see if the device data for the lun has been created */ + lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, + target_wwpn, lun); + if (lun_info) { + if (!lun_info->oas_enabled) + lun_info->oas_enabled = true; + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + + /* Create an lun info structure and add to list of luns */ + lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, + false); + if (lun_info) { + lun_info->oas_enabled = true; + lun_info->available = false; + list_add_tail(&lun_info->listentry, &phba->luns); + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + spin_unlock_irqrestore(&phba->devicelock, flags); + return false; +} + +/** + * lpfc_disable_oas_lun - disables a lun for OAS operations + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * + * This routine disables a lun for oas operations. The routines does so by + * doing the following : + * + * 1) Checks to see if the device data for the lun is created. + * 2) If present, clears the flag indicating this lun is for OAS. + * 3) If the lun is not available by the system, the device data is + * freed. + * + * Return codes: + * false - Error + * true - Success + **/ +bool +lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, + struct lpfc_name *target_wwpn, uint64_t lun) +{ + + struct lpfc_device_data *lun_info; + unsigned long flags; + + if (unlikely(!phba) || !vport_wwpn || !target_wwpn || + !phba->cfg_fof) + return false; + + spin_lock_irqsave(&phba->devicelock, flags); + + /* Check to see if the lun is available. */ + lun_info = __lpfc_get_device_data(phba, + &phba->luns, vport_wwpn, + target_wwpn, lun); + if (lun_info) { + lun_info->oas_enabled = false; + if (!lun_info->available) + lpfc_delete_device_data(phba, lun_info); + spin_unlock_irqrestore(&phba->devicelock, flags); + return true; + } + + spin_unlock_irqrestore(&phba->devicelock, flags); + return false; +} struct scsi_host_template lpfc_template = { .module = THIS_MODULE, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 21a2ffe67ea..0389ac1e7b8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -41,6 +41,20 @@ struct lpfc_rport_data { struct lpfc_nodelist *pnode; /* Pointer to the node structure. */ }; +struct lpfc_device_id { + struct lpfc_name vport_wwpn; + struct lpfc_name target_wwpn; + uint64_t lun; +}; + +struct lpfc_device_data { + struct list_head listentry; + struct lpfc_rport_data *rport_data; + struct lpfc_device_id device_id; + bool oas_enabled; + bool available; +}; + struct fcp_rsp { uint32_t rspRsvd1; /* FC Word 0, byte 0:3 */ uint32_t rspRsvd2; /* FC Word 1, byte 0:3 */ @@ -73,6 +87,7 @@ struct fcp_rsp { #define RSP_RO_MISMATCH_ERR 0x03 #define RSP_TM_NOT_SUPPORTED 0x04 /* Task mgmt function not supported */ #define RSP_TM_NOT_COMPLETED 0x05 /* Task mgmt function not performed */ +#define RSP_TM_INVALID_LU 0x09 /* Task mgmt function to invalid LU */ uint32_t rspInfoRsvd; /* FCP_RSP_INFO bytes 4-7 (reserved) */ @@ -165,3 +180,7 @@ struct lpfc_scsi_buf { #define LPFC_SCSI_DMA_EXT_SIZE 264 #define LPFC_BPL_SIZE 1024 #define MDAC_DIRECT_CMD 0x22 + +#define FIND_FIRST_OAS_LUN 0 +#define NO_MORE_OAS_LUN -1 +#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 74b67d98e95..32ada050557 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, uint32_t); +static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); +static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -263,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) return NULL; q->hba_index = idx; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Given what was seen in lpfc_sli4_cq_get() of speculative + * instructions allowing action on content before valid bit checked, + * add barrier here as well. May not be needed as "content" is a + * single 32-bit entity here (vs multi word structure for cq's). + */ + mb(); return eqe; } @@ -368,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) cqe = q->qe[q->hba_index].cqe; q->hba_index = idx; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Speculative instructions were allowing a bcopy at the start + * of lpfc_sli4_fp_handle_wcqe(), which is called immediately + * after our return, to copy data before the valid bit check above + * was done. As such, some of the copied data was stale. The barrier + * ensures the check is before any data is copied. + */ + mb(); return cqe; } @@ -438,11 +461,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, struct lpfc_rqe *temp_hrqe; struct lpfc_rqe *temp_drqe; struct lpfc_register doorbell; - int put_index = hq->host_index; + int put_index; /* sanity check on queue memory */ if (unlikely(!hq) || unlikely(!dq)) return -ENOMEM; + put_index = hq->host_index; temp_hrqe = hq->qe[hq->host_index].rqe; temp_drqe = dq->qe[dq->host_index].rqe; @@ -632,7 +656,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba, if (!ndlp) goto out; - if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { + if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { rrq->send_rrq = 0; rrq->xritag = 0; rrq->rrq_stop_time = 0; @@ -666,7 +690,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov + 1); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -675,7 +699,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) next_time = rrq->rrq_stop_time; } spin_unlock_irqrestore(&phba->hbalock, iflags); - if (!list_empty(&phba->active_rrq_list)) + if ((!list_empty(&phba->active_rrq_list)) && + (!(phba->pport->load_flag & FC_UNLOADING))) mod_timer(&phba->rrq_tmr, next_time); list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { list_del(&rrq->list); @@ -781,7 +806,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov * 2); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); list_splice_init(&phba->active_rrq_list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -789,7 +814,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) list_del(&rrq->list); lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } - if (!list_empty(&phba->active_rrq_list)) + if ((!list_empty(&phba->active_rrq_list)) && + (!(phba->pport->load_flag & FC_UNLOADING))) + mod_timer(&phba->rrq_tmr, next_time); } @@ -810,7 +837,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, { if (!ndlp) return 0; - if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) + if (!ndlp->active_rrqs_xri_bitmap) + return 0; + if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) return 1; else return 0; @@ -860,7 +889,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) goto out; - if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) + if (!ndlp->active_rrqs_xri_bitmap) + goto out; + + if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) goto out; spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -872,14 +904,17 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, xritag, rxid, ndlp->nlp_DID, send_rrq); return -EINVAL; } - rrq->send_rrq = send_rrq; + if (phba->cfg_enable_rrq == 1) + rrq->send_rrq = send_rrq; + else + rrq->send_rrq = 0; rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); + rrq->rrq_stop_time = jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); rrq->ndlp = ndlp; rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; rrq->rxid = rxid; - rrq->send_rrq = send_rrq; spin_lock_irqsave(&phba->hbalock, iflags); empty = list_empty(&phba->active_rrq_list); list_add_tail(&rrq->list, &phba->active_rrq_list); @@ -923,8 +958,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) ndlp = piocbq->context_un.ndlp; - else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && - (piocbq->iocb_flag & LPFC_IO_LIBDFC)) + else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) ndlp = piocbq->context_un.ndlp; else ndlp = piocbq->context1; @@ -1008,6 +1042,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) else sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); + if (sglq) { if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && (sglq->state != SGL_XRI_ABORTED)) { @@ -1024,7 +1059,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) &phba->sli4_hba.lpfc_sgl_list); /* Check if TXQ queue needs to be serviced */ - if (pring->txq_cnt) + if (!list_empty(&pring->txq)) lpfc_worker_wake_up(phba); } } @@ -1055,6 +1090,7 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); + /* * Clean all volatile data fields, preserve iotag and node struct. */ @@ -1121,7 +1157,6 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, while (!list_empty(iocblist)) { list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); - if (!piocb->iocb_cmpl) lpfc_sli_release_iocbq(phba, piocb); else { @@ -1309,18 +1344,17 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { list_add_tail(&piocb->list, &pring->txcmplq); piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; - pring->txcmplq_cnt++; - if (pring->txcmplq_cnt > pring->txcmplq_max) - pring->txcmplq_max = pring->txcmplq_cnt; if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && - (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { + (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && + (!(piocb->vport->load_flag & FC_UNLOADING))) { if (!piocb->vport) BUG(); else mod_timer(&piocb->vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov << 1)); + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); } @@ -1343,8 +1377,6 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) struct lpfc_iocbq *cmd_iocb; list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); - if (cmd_iocb != NULL) - pring->txq_cnt--; return cmd_iocb; } @@ -1613,8 +1645,9 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * (c) link attention events can be processed (fcp ring only) * (d) IOCB processing is not blocked by the outstanding mbox command. */ - if (pring->txq_cnt && - lpfc_is_link_up(phba) && + + if (lpfc_is_link_up(phba) && + (!list_empty(&pring->txq)) && (pring->ringno != phba->sli.fcp_ring || phba->sli.sli_flag & LPFC_PROCESS_LA)) { @@ -2322,7 +2355,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2336,7 +2370,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], - pmbox->un.varWords[7]); + pmbox->un.varWords[7], + pmbox->un.varWords[8], + pmbox->un.varWords[9], + pmbox->un.varWords[10]); if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); @@ -2611,7 +2648,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, cmd_iocb = phba->sli.iocbq_lookup[iotag]; list_del_init(&cmd_iocb->list); if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { - pring->txcmplq_cnt--; cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; } return cmd_iocb; @@ -2649,7 +2685,6 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, /* remove from txcmpl queue list */ list_del_init(&cmd_iocb->list); cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; - pring->txcmplq_cnt--; return cmd_iocb; } } @@ -2892,8 +2927,9 @@ void lpfc_poll_eratt(unsigned long ptr) lpfc_worker_wake_up(phba); else /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, jiffies + - HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); return; } @@ -3257,7 +3293,7 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (free_saveq) { list_for_each_entry_safe(rspiocbp, next_iocb, &saveq->list, list) { - list_del(&rspiocbp->list); + list_del_init(&rspiocbp->list); __lpfc_sli_release_iocbq(phba, rspiocbp); } __lpfc_sli_release_iocbq(phba, saveq); @@ -3496,15 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) /* Error everything on txq and txcmplq * First do the txq. */ - spin_lock_irq(&phba->hbalock); - list_splice_init(&pring->txq, &completions); - pring->txq_cnt = 0; + if (phba->sli_rev >= LPFC_SLI_REV4) { + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txq, &completions); + pring->txq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_lock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); + } else { + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->txq, &completions); + pring->txq_cnt = 0; - spin_unlock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); + } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, @@ -3512,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) } /** + * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in FCP rings and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + uint32_t i; + + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + lpfc_sli_abort_iocb_ring(phba, pring); + } + } else { + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); + } +} + + +/** * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring * @phba: Pointer to HBA context object. * @@ -3528,30 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) LIST_HEAD(txcmplq); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; - - /* Currently, only one fcp ring */ - pring = &psli->ring[psli->fcp_ring]; + uint32_t i; spin_lock_irq(&phba->hbalock); - /* Retrieve everything on txq */ - list_splice_init(&pring->txq, &txq); - pring->txq_cnt = 0; - - /* Retrieve everything on the txcmplq */ - list_splice_init(&pring->txcmplq, &txcmplq); - pring->txcmplq_cnt = 0; - /* Indicate the I/O queues are flushed */ phba->hba_flag |= HBA_FCP_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); - /* Flush the txq */ - lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, - IOERR_SLI_DOWN); + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + + spin_lock_irq(&pring->ring_lock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmpq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } + } else { + pring = &psli->ring[psli->fcp_ring]; - /* Flush the txcmpq */ - lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, - IOERR_SLI_DOWN); + spin_lock_irq(&phba->hbalock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmpq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } } /** @@ -3954,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; uint16_t cfg_value; - int rc; + int rc = 0; /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0295 Reset HBA Data: x%x x%x\n", - phba->pport->port_state, psli->sli_flag); + "0295 Reset HBA Data: x%x x%x x%x\n", + phba->pport->port_state, psli->sli_flag, + phba->hba_flag); /* perform board reset */ phba->fc_eventTag = 0; @@ -3972,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); + /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ + if (phba->hba_flag & HBA_FW_DUMP_OP) { + phba->hba_flag &= ~HBA_FW_DUMP_OP; + return rc; + } + /* Now physically reset the device */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0389 Performing PCI function reset!\n"); @@ -4565,7 +4675,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2708 This device does not support " - "Advanced Error Reporting (AER)\n"); + "Advanced Error Reporting (AER): %d\n", + rc); phba->cfg_aer_support = 0; } } @@ -4967,12 +5078,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) LPFC_QUEUE_REARM); } while (++fcp_eqidx < phba->cfg_fcp_io_channel); } + + if (phba->cfg_fof) + lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); + if (phba->sli4_hba.hba_eq) { for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], LPFC_QUEUE_REARM); } + + if (phba->cfg_fof) + lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM); } /** @@ -5498,6 +5616,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } + phba->sli4_hba.max_cfg_param.vpi_used = 0; break; case LPFC_RSC_TYPE_FCOE_XRI: kfree(phba->sli4_hba.xri_bmask); @@ -5798,6 +5917,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); } else { kfree(phba->vpi_bmask); + phba->sli4_hba.max_cfg_param.vpi_used = 0; kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.xri_bmask); @@ -5979,7 +6099,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; - int status, post_cnt = 0, num_posted = 0, block_cnt = 0; + int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); @@ -5987,10 +6107,11 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) LIST_HEAD(post_sgl_list); LIST_HEAD(free_sgl_list); - spin_lock(&phba->hbalock); + spin_lock_irq(&phba->hbalock); list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); - spin_unlock(&phba->hbalock); + spin_unlock_irq(&phba->hbalock); + total_cnt = phba->sli4_hba.els_xri_cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); @@ -6042,9 +6163,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt--; - spin_unlock_irq(&phba->hbalock); + total_cnt--; } } } @@ -6072,9 +6191,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) (sglq_entry_first->sli4_xritag + post_cnt - 1)); list_splice_init(&blck_sgl_list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt -= post_cnt; - spin_unlock_irq(&phba->hbalock); + total_cnt -= post_cnt; } /* don't reset xirtag due to hole in xri block */ @@ -6084,16 +6201,18 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* reset els sgl post count for next round of posting */ post_cnt = 0; } + /* update the number of XRIs posted for ELS */ + phba->sli4_hba.els_xri_cnt = total_cnt; /* free the els sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); /* push els sgls posted to the availble list */ if (!list_empty(&post_sgl_list)) { - spin_lock(&phba->hbalock); + spin_lock_irq(&phba->hbalock); list_splice_init(&post_sgl_list, &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&phba->hbalock); + spin_unlock_irq(&phba->hbalock); } else { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3161 Failure to post els sgl to port.\n"); @@ -6160,6 +6279,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) kfree(vpd); goto out_free_mbox; } + mqe = &mboxq->u.mqe; phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) @@ -6246,6 +6366,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); + /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ + rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); + if (phba->pport->cfg_lun_queue_depth > rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "3362 LUN queue depth changed from %d to %d\n", + phba->pport->cfg_lun_queue_depth, rc); + phba->pport->cfg_lun_queue_depth = rc; + } + + /* * Discover the port's supported feature set and match it against the * hosts requests. @@ -6433,16 +6563,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov * 2)); + jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Start error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -6551,6 +6682,108 @@ lpfc_mbox_timeout(unsigned long ptr) return; } +/** + * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions + * are pending + * @phba: Pointer to HBA context object. + * + * This function checks if any mailbox completions are present on the mailbox + * completion queue. + **/ +bool +lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) +{ + + uint32_t idx; + struct lpfc_queue *mcq; + struct lpfc_mcqe *mcqe; + bool pending_completions = false; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Check for completions on mailbox completion queue */ + + mcq = phba->sli4_hba.mbx_cq; + idx = mcq->hba_index; + while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { + mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; + if (bf_get_le32(lpfc_trailer_completed, mcqe) && + (!bf_get_le32(lpfc_trailer_async, mcqe))) { + pending_completions = true; + break; + } + idx = (idx + 1) % mcq->entry_count; + if (mcq->hba_index == idx) + break; + } + return pending_completions; + +} + +/** + * lpfc_sli4_process_missed_mbox_completions - process mbox completions + * that were missed. + * @phba: Pointer to HBA context object. + * + * For sli4, it is possible to miss an interrupt. As such mbox completions + * maybe missed causing erroneous mailbox timeouts to occur. This function + * checks to see if mbox completions are on the mailbox completion queue + * and will process all the completions associated with the eq for the + * mailbox completion queue. + **/ +bool +lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) +{ + + uint32_t eqidx; + struct lpfc_queue *fpeq = NULL; + struct lpfc_eqe *eqe; + bool mbox_pending; + + if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) + return false; + + /* Find the eq associated with the mcq */ + + if (phba->sli4_hba.hba_eq) + for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) + if (phba->sli4_hba.hba_eq[eqidx]->queue_id == + phba->sli4_hba.mbx_cq->assoc_qid) { + fpeq = phba->sli4_hba.hba_eq[eqidx]; + break; + } + if (!fpeq) + return false; + + /* Turn off interrupts from this EQ */ + + lpfc_sli4_eq_clr_intr(fpeq); + + /* Check to see if a mbox completion is pending */ + + mbox_pending = lpfc_sli4_mbox_completions_pending(phba); + + /* + * If a mbox completion is pending, process all the events on EQ + * associated with the mbox completion queue (this could include + * mailbox commands, async events, els commands, receive queue data + * and fcp commands) + */ + + if (mbox_pending) + while ((eqe = lpfc_sli4_eq_get(fpeq))) { + lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); + fpeq->EQ_processed++; + } + + /* Always clear and re-arm the EQ */ + + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + + return mbox_pending; + +} /** * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout @@ -6566,7 +6799,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; MAILBOX_t *mb = &pmbox->u.mb; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; + + /* If the mailbox completed, process the completion and return */ + if (lpfc_sli4_process_missed_mbox_completions(phba)) + return; /* Check the pmbox pointer first. There is a race condition * between the mbox timeout handler getting executed in the @@ -6604,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0345 Resetting board due to mailbox timeout\n"); @@ -6809,8 +7044,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000); + mod_timer(&psli->mbox_tmo, jiffies + timeout); } /* Mailbox cmd <cmd> issue */ @@ -7061,6 +7297,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 1000) + jiffies; spin_unlock_irq(&phba->hbalock); + /* Make sure the mailbox is really active */ + if (timeout) + lpfc_sli4_process_missed_mbox_completions(phba); + /* Wait for the outstnading mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ @@ -7483,7 +7723,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -7614,7 +7854,6 @@ __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, { /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); - pring->txq_cnt++; } /** @@ -7902,15 +8141,22 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, static inline uint32_t lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { - int i; - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) - i = smp_processor_id(); - else - i = atomic_add_return(1, &phba->fcp_qidx); - - i = (i % phba->cfg_fcp_io_channel); - return i; + struct lpfc_vector_map_info *cpup; + int chann, cpu; + + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU + && phba->cfg_fcp_io_channel > 1) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + chann = cpu; + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; } /** @@ -7962,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, abort_tag = (uint32_t) iocbq->iotag; xritag = iocbq->sli4_xritag; wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ + wqe->generic.wqe_com.word10 = 0; /* words0-2 bpl convert bde */ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / @@ -8055,6 +8302,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); + wqe->els_req.max_response_payload_len = total_len - xmit_len; break; case CMD_XMIT_SEQUENCE64_CX: bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, @@ -8099,8 +8347,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, command_type = FCP_COMMAND_DATA_OUT; /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - wqe->fcp_iwrite.payload_offset_len = - xmit_len + sizeof(struct fcp_rsp); + bf_set(payload_offset_len, &wqe->fcp_iwrite, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_iwrite, + 0); /* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, @@ -8114,12 +8364,22 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); + if (iocbq->iocb_flag & LPFC_IO_OAS) { + bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); + if (phba->cfg_XLanePriority) { + bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + } break; case CMD_FCP_IREAD64_CR: /* word3 iocb=iotag wqe=payload_offset_len */ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ - wqe->fcp_iread.payload_offset_len = - xmit_len + sizeof(struct fcp_rsp); + bf_set(payload_offset_len, &wqe->fcp_iread, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_iread, + 0); /* word4 iocb=parameter wqe=total_xfer_length memcpy */ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, @@ -8133,10 +8393,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); + if (iocbq->iocb_flag & LPFC_IO_OAS) { + bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); + if (phba->cfg_XLanePriority) { + bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + } break; case CMD_FCP_ICMND64_CR: + /* word3 iocb=iotag wqe=payload_offset_len */ + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ + bf_set(payload_offset_len, &wqe->fcp_icmd, + xmit_len + sizeof(struct fcp_rsp)); + bf_set(cmd_buff_len, &wqe->fcp_icmd, + 0); /* word3 iocb=IO_TAG wqe=reserved */ - wqe->fcp_icmd.rsrvd3 = 0; bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); /* Always open the exchange */ bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); @@ -8148,6 +8421,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, iocbq->iocb.ulpFCP2Rcvy); + if (iocbq->iocb_flag & LPFC_IO_OAS) { + bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); + if (phba->cfg_XLanePriority) { + bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, + (phba->cfg_XLanePriority << 1)); + } + } break; case CMD_GEN_REQUEST64_CR: /* For this command calculate the xmit length of the @@ -8182,6 +8463,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); + wqe->gen_req.max_response_payload_len = total_len - xmit_len; command_type = OTHER_COMMAND; break; case CMD_XMIT_ELS_RSP64_CX: @@ -8379,6 +8661,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, { struct lpfc_sglq *sglq; union lpfc_wqe wqe; + struct lpfc_queue *wq; struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; if (piocb->sli4_xritag == NO_XRI) { @@ -8386,7 +8669,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) sglq = NULL; else { - if (pring->txq_cnt) { + if (!list_empty(&pring->txq)) { if (!(flag & SLI_IOCB_RET_IOCB)) { __lpfc_sli_ringtx_put(phba, pring, piocb); @@ -8431,11 +8714,17 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_ERROR; if ((piocb->iocb_flag & LPFC_IO_FCP) || - (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], - &wqe)) + (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { + wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; + } else { + wq = phba->sli4_hba.oas_wq; + } + if (lpfc_sli4_wq_put(wq, &wqe)) return IOCB_ERROR; } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) return IOCB_ERROR; } @@ -8521,12 +8810,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, if (phba->sli_rev == LPFC_SLI_REV4) { if (piocb->iocb_flag & LPFC_IO_FCP) { - if (unlikely(!phba->sli4_hba.fcp_wq)) - return IOCB_ERROR; - idx = lpfc_sli4_scmd_to_wqidx_distr(phba); - piocb->fcp_wqidx = idx; - ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; - + if (!phba->cfg_fof || (!(piocb->iocb_flag & + LPFC_IO_OAS))) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return IOCB_ERROR; + idx = lpfc_sli4_scmd_to_wqidx_distr(phba); + piocb->fcp_wqidx = idx; + ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; + } else { + if (unlikely(!phba->sli4_hba.oas_wq)) + return IOCB_ERROR; + idx = 0; + piocb->fcp_wqidx = 0; + ring_number = LPFC_FCP_OAS_RING; + } pring = &phba->sli.ring[ring_number]; spin_lock_irqsave(&pring->ring_lock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, @@ -8700,7 +8997,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "3116 Port generated FCP XRI ABORT event on " "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", - ndlp->vport->vpi, ndlp->nlp_rpi, + ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], bf_get(lpfc_wcqe_xa_xri, axri), bf_get(lpfc_wcqe_xa_status, axri), axri->parameter); @@ -8948,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) pring->sli.sli3.next_cmdidx = 0; pring->sli.sli3.local_getidx = 0; pring->sli.sli3.cmdidx = 0; + pring->flag = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); @@ -9054,7 +9352,6 @@ lpfc_sli_host_down(struct lpfc_vport *vport) if (iocb->vport != vport) continue; list_move_tail(&iocb->list, &completions); - pring->txq_cnt--; } /* Next issue ABTS for everything on the txcmplq */ @@ -9123,8 +9420,6 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) * given to the FW yet. */ list_splice_init(&pring->txq, &completions); - pring->txq_cnt = 0; - } spin_unlock_irqrestore(&phba->hbalock, flags); @@ -9586,43 +9881,6 @@ abort_iotag_exit: } /** - * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * - * This function aborts all iocbs in the given ring and frees all the iocb - * objects in txq. This function issues abort iocbs unconditionally for all - * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed - * to complete before the return of this function. The caller is not required - * to hold any locks. - **/ -static void -lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) -{ - LIST_HEAD(completions); - struct lpfc_iocbq *iocb, *next_iocb; - - if (pring->ringno == LPFC_ELS_RING) - lpfc_fabric_abort_hba(phba); - - spin_lock_irq(&phba->hbalock); - - /* Take off all the iocbs on txq for cancelling */ - list_splice_init(&pring->txq, &completions); - pring->txq_cnt = 0; - - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_abort_iotag_issue(phba, pring, iocb); - - spin_unlock_irq(&phba->hbalock); - - /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); -} - -/** * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. * @phba: pointer to lpfc HBA data structure. * @@ -9637,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - lpfc_sli_iocb_ring_abort(phba, pring); + lpfc_sli_abort_iocb_ring(phba, pring); } } @@ -9759,7 +10017,7 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3096 ABORT_XRI_CN completing on xri x%x " + "3096 ABORT_XRI_CN completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " "status 0x%x, reason 0x%x\n", cmdiocb->iocb.un.acxri.abortContextTag, @@ -9809,6 +10067,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abort_cmd) != 0) continue; + /* + * If the iocbq is already being aborted, don't take a second + * action, but do count it. + */ + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + /* issue ABTS for this IOCB based on iotag */ abtsiocb = lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { @@ -9816,6 +10081,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, continue; } + /* indicate the IO is being aborted by the driver. */ + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + cmd = &iocbq->iocb; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; @@ -9825,7 +10093,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; - abtsiocb->vport = phba->pport; + abtsiocb->vport = vport; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; @@ -9852,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, } /** + * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN + * @vport: Pointer to virtual port. + * @pring: Pointer to driver SLI ring object. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. + * + * This function sends an abort command for every SCSI command + * associated with the given virtual port pending on the ring + * filtered by lpfc_sli_validate_fcp_iocb function. + * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the + * FCP iocbs associated with lun specified by tgt_id and lun_id + * parameters + * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the + * FCP iocbs associated with SCSI target specified by tgt_id parameter. + * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all + * FCP iocbs associated with virtual port. + * This function returns number of iocbs it aborted . + * This function is called with no locks held right after a taskmgmt + * command is sent. + **/ +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, + uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *abtsiocbq; + struct lpfc_iocbq *iocbq; + IOCB_t *icmd; + int sum, i, ret_val; + unsigned long iflags; + struct lpfc_sli_ring *pring_s4; + uint32_t ring_number; + + spin_lock_irq(&phba->hbalock); + + /* all I/Os are in process of being flushed */ + if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { + spin_unlock_irq(&phba->hbalock); + return 0; + } + sum = 0; + + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, + cmd) != 0) + continue; + + /* + * If the iocbq is already being aborted, don't take a second + * action, but do count it. + */ + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + + /* issue ABTS for this IOCB based on iotag */ + abtsiocbq = __lpfc_sli_get_iocbq(phba); + if (abtsiocbq == NULL) + continue; + + icmd = &iocbq->iocb; + abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; + abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; + if (phba->sli_rev == LPFC_SLI_REV4) + abtsiocbq->iocb.un.acxri.abortIoTag = + iocbq->sli4_xritag; + else + abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; + abtsiocbq->iocb.ulpLe = 1; + abtsiocbq->iocb.ulpClass = icmd->ulpClass; + abtsiocbq->vport = vport; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; + if (iocbq->iocb_flag & LPFC_IO_FCP) + abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; + + if (lpfc_is_link_up(phba)) + abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; + else + abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; + + /* Setup callback routine and issue the command. */ + abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; + + /* + * Indicate the IO is being aborted by the driver and set + * the caller's flag into the aborted IO. + */ + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + + if (phba->sli_rev == LPFC_SLI_REV4) { + ring_number = MAX_SLI3_CONFIGURED_RINGS + + iocbq->fcp_wqidx; + pring_s4 = &phba->sli.ring[ring_number]; + /* Note: both hbalock and ring_lock must be set here */ + spin_lock_irqsave(&pring_s4->ring_lock, iflags); + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocbq, 0); + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); + } else { + ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbq, 0); + } + + + if (ret_val == IOCB_ERROR) + __lpfc_sli_release_iocbq(phba, abtsiocbq); + else + sum++; + } + spin_unlock_irq(&phba->hbalock); + return sum; +} + +/** * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler * @phba: Pointer to HBA context object. * @cmdiocbq: Pointer to command iocb. @@ -9878,6 +10264,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd; spin_lock_irqsave(&phba->hbalock, iflags); + if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { + + /* + * A time out has occurred for the iocb. If a time out + * completion handler has been supplied, call it. Otherwise, + * just free the iocbq. + */ + + spin_unlock_irqrestore(&phba->hbalock, iflags); + cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; + cmdiocbq->wait_iocb_cmpl = NULL; + if (cmdiocbq->iocb_cmpl) + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); + else + lpfc_sli_release_iocbq(phba, cmdiocbq); + return; + } + cmdiocbq->iocb_flag |= LPFC_IO_WAKE; if (cmdiocbq->context2 && rspiocbq) memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, @@ -9933,10 +10337,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba, * @timeout: Timeout in number of seconds. * * This function issues the iocb to firmware and waits for the - * iocb to complete. If the iocb command is not - * completed within timeout seconds, it returns IOCB_TIMEDOUT. - * Caller should not free the iocb resources if this function - * returns IOCB_TIMEDOUT. + * iocb to complete. The iocb_cmpl field of the shall be used + * to handle iocbs which time out. If the field is NULL, the + * function shall free the iocbq structure. If more clean up is + * needed, the caller is expected to provide a completion function + * that will provide the needed clean up. If the iocb command is + * not completed within timeout seconds, the function will either + * free the iocbq structure (if iocb_cmpl == NULL) or execute the + * completion function set in the iocb_cmpl field and then return + * a status of IOCB_TIMEDOUT. The caller should not free the iocb + * resources if this function returns IOCB_TIMEDOUT. * The function waits for the iocb completion using an * non-interruptible wait. * This function will sleep while waiting for iocb completion. @@ -9965,7 +10375,13 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, long timeleft, timeout_req = 0; int retval = IOCB_SUCCESS; uint32_t creg_val; + struct lpfc_iocbq *iocb; + int txq_cnt = 0; + int txcmplq_cnt = 0; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + unsigned long iflags; + bool iocb_completed = true; + /* * If the caller has provided a response iocbq buffer, then context2 * is NULL or its an error. @@ -9976,9 +10392,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, piocb->context2 = prspiocbq; } + piocb->wait_iocb_cmpl = piocb->iocb_cmpl; piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; piocb->context_un.wait_queue = &done_q; - piocb->iocb_flag &= ~LPFC_IO_WAKE; + piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); if (phba->cfg_poll & DISABLE_FCP_RING_INT) { if (lpfc_readl(phba->HCregaddr, &creg_val)) @@ -9991,14 +10408,30 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = timeout * HZ; + timeout_req = msecs_to_jiffies(timeout * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); + spin_lock_irqsave(&phba->hbalock, iflags); + if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { + + /* + * IOCB timed out. Inform the wake iocb wait + * completion function and set local status + */ - if (piocb->iocb_flag & LPFC_IO_WAKE) { + iocb_completed = false; + piocb->iocb_flag |= LPFC_IO_WAKE_TMO; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (iocb_completed) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "0331 IOCB wake signaled\n"); + /* Note: we are not indicating if the IOCB has a success + * status or not - that's for the caller to check. + * IOCB_SUCCESS means just that the command was sent and + * completed. Not that it completed successfully. + * */ } else if (timeleft == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0338 IOCB wait timeout error - no " @@ -10012,9 +10445,17 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = IOCB_TIMEDOUT; } } else if (retval == IOCB_BUSY) { - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", - phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); + if (phba->cfg_log_verbose & LOG_SLI) { + list_for_each_entry(iocb, &pring->txq, list) { + txq_cnt++; + } + list_for_each_entry(iocb, &pring->txcmplq, list) { + txcmplq_cnt++; + } + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", + phba->iocb_cnt, txq_cnt, txcmplq_cnt); + } return retval; } else { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -10070,12 +10511,13 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, uint32_t timeout) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); + MAILBOX_t *mb = NULL; int retval; unsigned long flag; - /* The caller must leave context1 empty. */ + /* The caller might set context1 for extended buffer */ if (pmboxq->context1) - return MBX_NOT_FINISHED; + mb = (MAILBOX_t *)pmboxq->context1; pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; /* setup wake call as IOCB callback */ @@ -10088,22 +10530,25 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_event_interruptible_timeout(done_q, pmboxq->mbox_flag & LPFC_MBX_WAKE, - timeout * HZ); + msecs_to_jiffies(timeout * 1000)); spin_lock_irqsave(&phba->hbalock, flag); - pmboxq->context1 = NULL; + /* restore the possible extended buffer for free resource */ + pmboxq->context1 = (uint8_t *)mb; /* * if LPFC_MBX_WAKE flag is set the mailbox is completed * else do not free the resources. */ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { retval = MBX_SUCCESS; - lpfc_sli4_swap_str(phba, pmboxq); } else { retval = MBX_TIMEOUT; pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } spin_unlock_irqrestore(&phba->hbalock, flag); + } else { + /* restore the possible extended buffer for free resource */ + pmboxq->context1 = (uint8_t *)mb; } return retval; @@ -10988,8 +11433,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbOut, struct lpfc_wcqe_complete *wcqe) { + int numBdes, i; unsigned long iflags; - uint32_t status; + uint32_t status, max_response; + struct lpfc_dmabuf *dmabuf; + struct ulp_bde64 *bpl, bde; size_t offset = offsetof(struct lpfc_iocbq, iocb); memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, @@ -11006,7 +11454,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; else { pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; + switch (pIocbOut->iocb.ulpCommand) { + case CMD_ELS_REQUEST64_CR: + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; + bpl = (struct ulp_bde64 *)dmabuf->virt; + bde.tus.w = le32_to_cpu(bpl[1].tus.w); + max_response = bde.tus.f.bdeSize; + break; + case CMD_GEN_REQUEST64_CR: + max_response = 0; + if (!pIocbOut->context3) + break; + numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ + sizeof(struct ulp_bde64); + dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; + bpl = (struct ulp_bde64 *)dmabuf->virt; + for (i = 0; i < numBdes; i++) { + bde.tus.w = le32_to_cpu(bpl[i].tus.w); + if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) + max_response += bde.tus.f.bdeSize; + } + break; + default: + max_response = wcqe->total_data_placed; + break; + } + if (max_response < wcqe->total_data_placed) + pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; + else + pIocbIn->iocb.un.genreq64.bdl.bdeSize = + wcqe->total_data_placed; } /* Convert BG errors for completion status */ @@ -11297,16 +11774,25 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_iocbq *irspiocbq; unsigned long iflags; struct lpfc_sli_ring *pring = cq->pring; + int txq_cnt = 0; + int txcmplq_cnt = 0; + int fcp_txcmplq_cnt = 0; /* Get an irspiocbq for later ELS response processing use */ irspiocbq = lpfc_sli_get_iocbq(phba); if (!irspiocbq) { + if (!list_empty(&pring->txq)) + txq_cnt++; + if (!list_empty(&pring->txcmplq)) + txcmplq_cnt++; + if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) + fcp_txcmplq_cnt++; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", - pring->txq_cnt, phba->iocb_cnt, - phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, - phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); + txq_cnt, phba->iocb_cnt, + fcp_txcmplq_cnt, + txcmplq_cnt); return false; } @@ -11877,6 +12363,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); } + +/** + * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue + * entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * + * This routine process a event queue entry from the Flash Optimized Fabric + * event queue. It will check the MajorCode and MinorCode to determine this + * is for a completion event on a completion queue, if not, an error shall be + * logged and just return. Otherwise, it will get to the corresponding + * completion queue and process all the entries on the completion queue, rearm + * the completion queue, and then return. + **/ +static void +lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) +{ + struct lpfc_queue *cq; + struct lpfc_cqe *cqe; + bool workposted = false; + uint16_t cqid; + int ecount = 0; + + if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9147 Not a valid completion " + "event: majorcode=x%x, minorcode=x%x\n", + bf_get_le32(lpfc_eqe_major_code, eqe), + bf_get_le32(lpfc_eqe_minor_code, eqe)); + return; + } + + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + + /* Next check for OAS */ + cq = phba->sli4_hba.oas_cq; + if (unlikely(!cq)) { + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9148 OAS completion queue " + "does not exist\n"); + return; + } + + if (unlikely(cqid != cq->queue_id)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9149 Miss-matched fast-path compl " + "queue id: eqcqid=%d, fcpcqid=%d\n", + cqid, cq->queue_id); + return; + } + + /* Process all the entries to the OAS CQ */ + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + if (!(++ecount % cq->entry_repost)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + + /* Track the max number of CQEs processed in 1 EQ */ + if (ecount > cq->CQ_max_cqe) + cq->CQ_max_cqe = ecount; + + /* Catch the no cq entry condition */ + if (unlikely(ecount == 0)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9153 No entry from fast-path completion " + "queue fcpcqid=%d\n", cq->queue_id); + + /* In any case, flash and re-arm the CQ */ + lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-4 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric + * IOCB ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The Flash Optimized Fabric ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. Note that, + * the EQ to CQ are one-to-one map such that the EQ index is + * equal to that of CQ index. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_fof_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_queue *eq; + struct lpfc_eqe *eqe; + unsigned long iflag; + int ecount = 0; + uint32_t eqidx; + + /* Get the driver's phba structure from the dev_id */ + fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; + phba = fcp_eq_hdl->phba; + eqidx = fcp_eq_hdl->idx; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + eq = phba->sli4_hba.fof_eq; + if (unlikely(!eq)) + return IRQ_NONE; + + /* Check device state for handling interrupt */ + if (unlikely(lpfc_intr_state_check(phba))) { + eq->EQ_badstate++; + /* Check again for link_state with lock held */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (phba->link_state < LPFC_LINK_DOWN) + /* Flush, clear interrupt, and rearm the EQ */ + lpfc_sli4_eq_flush(phba, eq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + + /* + * Process all the event on FCP fast-path EQ + */ + while ((eqe = lpfc_sli4_eq_get(eq))) { + lpfc_sli4_fof_handle_eqe(phba, eqe); + if (!(++ecount % eq->entry_repost)) + lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); + eq->EQ_processed++; + } + + /* Track the max number of EQEs processed in 1 intr */ + if (ecount > eq->EQ_max_eqe) + eq->EQ_max_eqe = ecount; + + + if (unlikely(ecount == 0)) { + eq->EQ_no_entry++; + + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "9145 MSI-X interrupt with no EQE\n"); + else { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "9146 ISR interrupt with no EQE\n"); + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + } + /* Always clear and re-arm the fast-path EQ */ + lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); + return IRQ_HANDLED; +} + /** * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device * @irq: Interrupt number. @@ -12032,6 +12687,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) hba_handled |= true; } + if (phba->cfg_fof) { + hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, + &phba->sli4_hba.fcp_eq_hdl[0]); + if (hba_irq_rc == IRQ_HANDLED) + hba_handled |= true; + } + return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; } /* lpfc_sli4_intr_handler */ @@ -12148,7 +12810,6 @@ static void __iomem * lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) { struct pci_dev *pdev; - unsigned long bar_map, bar_map_len; if (!phba->pcidev) return NULL; @@ -12157,25 +12818,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) switch (pci_barset) { case WQ_PCI_BAR_0_AND_1: - if (!phba->pci_bar0_memmap_p) { - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0); - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); - phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len); - } return phba->pci_bar0_memmap_p; case WQ_PCI_BAR_2_AND_3: - if (!phba->pci_bar2_memmap_p) { - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2); - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); - phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len); - } return phba->pci_bar2_memmap_p; case WQ_PCI_BAR_4_AND_5: - if (!phba->pci_bar4_memmap_p) { - bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4); - bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); - phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len); - } return phba->pci_bar4_memmap_p; default: break; @@ -12784,10 +13430,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, wq->page_count); bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); + + /* wqv is the earliest version supported, NOT the latest */ bf_set(lpfc_mbox_hdr_version, &shdr->request, phba->sli4_hba.pc_sli4_params.wqv); - if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) { + switch (phba->sli4_hba.pc_sli4_params.wqv) { + case LPFC_Q_CREATE_VERSION_0: + switch (wq->entry_size) { + default: + case 64: + /* Nothing to do, version 0 ONLY supports 64 byte */ + page = wq_create->u.request.page; + break; + case 128: + if (!(phba->sli4_hba.pc_sli4_params.wqsize & + LPFC_WQ_SZ128_SUPPORT)) { + status = -ERANGE; + goto out; + } + /* If we get here the HBA MUST also support V1 and + * we MUST use it + */ + bf_set(lpfc_mbox_hdr_version, &shdr->request, + LPFC_Q_CREATE_VERSION_1); + + bf_set(lpfc_mbx_wq_create_wqe_count, + &wq_create->u.request_1, wq->entry_count); + bf_set(lpfc_mbx_wq_create_wqe_size, + &wq_create->u.request_1, + LPFC_WQ_WQE_SIZE_128); + bf_set(lpfc_mbx_wq_create_page_size, + &wq_create->u.request_1, + (PAGE_SIZE/SLI4_PAGE_SIZE)); + page = wq_create->u.request_1.page; + break; + } + break; + case LPFC_Q_CREATE_VERSION_1: bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, wq->entry_count); switch (wq->entry_size) { @@ -12798,6 +13478,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, LPFC_WQ_WQE_SIZE_64); break; case 128: + if (!(phba->sli4_hba.pc_sli4_params.wqsize & + LPFC_WQ_SZ128_SUPPORT)) { + status = -ERANGE; + goto out; + } bf_set(lpfc_mbx_wq_create_wqe_size, &wq_create->u.request_1, LPFC_WQ_WQE_SIZE_128); @@ -12806,9 +13491,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, (PAGE_SIZE/SLI4_PAGE_SIZE)); page = wq_create->u.request_1.page; - } else { - page = wq_create->u.request.page; + break; + default: + status = -ERANGE; + goto out; } + list_for_each_entry(dmabuf, &wq->page_list, list) { memset(dmabuf->virt, 0, hw_page_size); page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); @@ -12870,8 +13558,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, } wq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3264 WQ[%d]: barset:x%x, offset:x%x\n", - wq->queue_id, pci_barset, db_offset); + "3264 WQ[%d]: barset:x%x, offset:x%x, " + "format:x%x\n", wq->queue_id, pci_barset, + db_offset, wq->db_format); } else { wq->db_format = LPFC_DB_LIST_FORMAT; wq->db_regaddr = phba->sli4_hba.WQDBregaddr; @@ -13091,8 +13780,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } hrq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", - hrq->queue_id, pci_barset, db_offset); + "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " + "format:x%x\n", hrq->queue_id, pci_barset, + db_offset, hrq->db_format); } else { hrq->db_format = LPFC_DB_RING_FORMAT; hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; @@ -13942,13 +14632,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s " - "Frame Data:%08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type], + "2538 Received frame rctl:%s (x%x), type:%s (x%x), " + "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), - be32_to_cpu(header[4]), be32_to_cpu(header[5])); + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, @@ -14626,14 +15317,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->phba->vpi_ids[vport->vpi]; /* put the first buffer into the first IOCBq */ + tot_len = bf_get(lpfc_rcqe_length, + &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + first_iocbq->context2 = &seq_dmabuf->dbuf; first_iocbq->context3 = NULL; first_iocbq->iocb.ulpBdeCount = 1; - first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = + if (tot_len > LPFC_DATA_BUF_SIZE) + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; + first_iocbq->iocb.un.rcvels.remoteID = sid; - tot_len = bf_get(lpfc_rcqe_length, - &seq_dmabuf->cq_event.cqe.rcqe_cmpl); + first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; } iocbq = first_iocbq; @@ -14649,14 +15346,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) if (!iocbq->context3) { iocbq->context3 = d_buf; iocbq->iocb.ulpBdeCount++; - pbde = (struct ulp_bde64 *) - &iocbq->iocb.unsli3.sli3Words[4]; - pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; - /* We need to get the size out of the right CQE */ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); len = bf_get(lpfc_rcqe_length, &hbq_buf->cq_event.cqe.rcqe_cmpl); + pbde = (struct ulp_bde64 *) + &iocbq->iocb.unsli3.sli3Words[4]; + if (len > LPFC_DATA_BUF_SIZE) + pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + pbde->tus.f.bdeSize = len; + iocbq->iocb.unsli3.rcvsli3.acc_len += len; tot_len += len; } else { @@ -14671,16 +15371,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) lpfc_in_buf_free(vport->phba, d_buf); continue; } + /* We need to get the size out of the right CQE */ + hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); + len = bf_get(lpfc_rcqe_length, + &hbq_buf->cq_event.cqe.rcqe_cmpl); iocbq->context2 = d_buf; iocbq->context3 = NULL; iocbq->iocb.ulpBdeCount = 1; - iocbq->iocb.un.cont64[0].tus.f.bdeSize = + if (len > LPFC_DATA_BUF_SIZE) + iocbq->iocb.un.cont64[0].tus.f.bdeSize = LPFC_DATA_BUF_SIZE; + else + iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; - /* We need to get the size out of the right CQE */ - hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); - len = bf_get(lpfc_rcqe_length, - &hbq_buf->cq_event.cqe.rcqe_cmpl); tot_len += len; iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; @@ -14962,6 +15665,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) uint16_t max_rpi, rpi_limit; uint16_t rpi_remaining, lrpi = 0; struct lpfc_rpi_hdr *rpi_hdr; + unsigned long iflag; max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; rpi_limit = phba->sli4_hba.next_rpi; @@ -14970,7 +15674,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * Fetch the next logical rpi. Because this index is logical, * the driver starts at 0 each time. */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflag); rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); if (rpi >= rpi_limit) rpi = LPFC_RPI_ALLOC_ERROR; @@ -14986,7 +15690,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) */ if ((rpi == LPFC_RPI_ALLOC_ERROR) && (phba->sli4_hba.rpi_count >= max_rpi)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } @@ -14995,7 +15699,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * extents. */ if (!phba->sli4_hba.rpi_hdrs_in_use) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return rpi; } @@ -15006,7 +15710,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) * how many are supported max by the device. */ rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); if (!rpi_hdr) { @@ -15481,11 +16185,18 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) LPFC_SLI4_FCF_TBL_INDX_MAX); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "3060 Last IDX %d\n", last_index); - if (list_empty(&phba->fcf.fcf_pri_list)) { + + /* Verify the priority list has 2 or more entries */ + spin_lock_irq(&phba->hbalock); + if (list_empty(&phba->fcf.fcf_pri_list) || + list_is_singular(&phba->fcf.fcf_pri_list)) { + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "3061 Last IDX %d\n", last_index); return 0; /* Empty rr list */ } + spin_unlock_irq(&phba->hbalock); + next_fcf_pri = 0; /* * Clear the rr_bmask and set all of the bits that are at this @@ -15659,7 +16370,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) { - struct lpfc_fcf_pri *fcf_pri; + struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2762 FCF (x%x) reached driver's book " @@ -15669,7 +16380,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) } /* Clear the eligible FCF record index bmask */ spin_lock_irq(&phba->hbalock); - list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { + list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, + list) { if (fcf_pri->fcf_rec.fcf_index == fcf_index) { list_del_init(&fcf_pri->list); break; @@ -16239,35 +16951,41 @@ lpfc_drain_txq(struct lpfc_hba *phba) { LIST_HEAD(completions); struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - struct lpfc_iocbq *piocbq = 0; + struct lpfc_iocbq *piocbq = NULL; unsigned long iflags = 0; char *fail_msg = NULL; struct lpfc_sglq *sglq; union lpfc_wqe wqe; + int txq_cnt = 0; - spin_lock_irqsave(&phba->hbalock, iflags); - if (pring->txq_cnt > pring->txq_max) - pring->txq_max = pring->txq_cnt; + spin_lock_irqsave(&pring->ring_lock, iflags); + list_for_each_entry(piocbq, &pring->txq, list) { + txq_cnt++; + } - spin_unlock_irqrestore(&phba->hbalock, iflags); + if (txq_cnt > pring->txq_max) + pring->txq_max = txq_cnt; - while (pring->txq_cnt) { - spin_lock_irqsave(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); + + while (!list_empty(&pring->txq)) { + spin_lock_irqsave(&pring->ring_lock, iflags); piocbq = lpfc_sli_ringtx_get(phba, pring); if (!piocbq) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "2823 txq empty and txq_cnt is %d\n ", - pring->txq_cnt); + txq_cnt); break; } sglq = __lpfc_sli_get_sglq(phba, piocbq); if (!sglq) { __lpfc_sli_ringtx_put(phba, pring, piocbq); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); break; } + txq_cnt--; /* The xri and iocb resources secured, * attempt to issue request @@ -16292,12 +17010,12 @@ lpfc_drain_txq(struct lpfc_hba *phba) piocbq->iotag, piocbq->sli4_xritag); list_add_tail(&piocbq->list, &completions); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irqrestore(&pring->ring_lock, iflags); } /* Cancel all the IOCBs that cannot be issued */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); - return pring->txq_cnt; + return txq_cnt; } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 9d2e0c6fe33..edb48832c39 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2007 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -58,9 +58,10 @@ struct lpfc_iocbq { IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ - uint16_t iocb_flag; + uint32_t iocb_flag; #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ -#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ +#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ +#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ #define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ #define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ #define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */ @@ -72,11 +73,13 @@ struct lpfc_iocbq { #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ +#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_SHIFT 14 - uint8_t rsvd2; +#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ + uint32_t drvrTimeout; /* driver timeout in seconds */ uint32_t fcp_wqidx; /* index to FCP work queue */ struct lpfc_vport *vport;/* virtual port pointer */ @@ -93,6 +96,8 @@ struct lpfc_iocbq { void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); }; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index be02b59ea27..7f50aa04d66 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2011 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -39,6 +39,10 @@ #define LPFC_FCP_IO_CHAN_MIN 1 #define LPFC_FCP_IO_CHAN_MAX 16 +/* Number of channels used for Flash Optimized Fabric (FOF) operations */ + +#define LPFC_FOF_IO_CHAN_NUM 1 + /* * Provide the default FCF Record attributes used by the driver * when nonFIP mode is configured and there is no other default @@ -117,6 +121,7 @@ union sli4_qe { struct lpfc_rcqe_complete *rcqe_complete; struct lpfc_mqe *mqe; union lpfc_wqe *wqe; + union lpfc_wqe128 *wqe128; struct lpfc_rqe *rqe; }; @@ -325,12 +330,14 @@ struct lpfc_bmbx { #define LPFC_EQE_SIZE_16B 16 #define LPFC_CQE_SIZE 16 #define LPFC_WQE_SIZE 64 +#define LPFC_WQE128_SIZE 128 #define LPFC_MQE_SIZE 256 #define LPFC_RQE_SIZE 8 #define LPFC_EQE_DEF_COUNT 1024 #define LPFC_CQE_DEF_COUNT 1024 #define LPFC_WQE_DEF_COUNT 256 +#define LPFC_WQE128_DEF_COUNT 128 #define LPFC_MQE_DEF_COUNT 16 #define LPFC_RQE_DEF_COUNT 512 @@ -346,11 +353,6 @@ struct lpfc_bmbx { #define SLI4_CT_VFI 2 #define SLI4_CT_FCFI 3 -#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000 -#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000 -#define LPFC_SLI4_MIN_BUF_SIZE 0x400 -#define LPFC_SLI4_MAX_BUF_SIZE 0x20000 - /* * SLI4 specific data structures */ @@ -401,6 +403,7 @@ struct lpfc_pc_sli4_params { uint32_t if_page_sz; uint32_t rq_db_window; uint32_t loopbk_scope; + uint32_t oas_supported; uint32_t eq_pages_max; uint32_t eqe_size; uint32_t cq_pages_max; @@ -421,6 +424,9 @@ struct lpfc_pc_sli4_params { uint8_t mqv; uint8_t wqv; uint8_t rqv; + uint8_t wqsize; +#define LPFC_WQ_SZ64_SUPPORT 1 +#define LPFC_WQ_SZ128_SUPPORT 2 }; struct lpfc_iov { @@ -438,8 +444,20 @@ struct lpfc_sli4_lnk_info { uint8_t lnk_no; }; +#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \ + LPFC_FOF_IO_CHAN_NUM) #define LPFC_SLI4_HANDLER_NAME_SZ 16 +/* Used for IRQ vector to CPU mapping */ +struct lpfc_vector_map_info { + uint16_t phys_id; + uint16_t core_id; + uint16_t irq; + uint16_t channel_id; + struct cpumask maskbits; +}; +#define LPFC_VECTOR_MAP_EMPTY 0xffff + /* SLI4 HBA data structure entries */ struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for @@ -496,7 +514,7 @@ struct lpfc_sli4_hba { struct lpfc_register sli_intf; struct lpfc_pc_sli4_params pc_sli4_params; struct msix_entry *msix_entries; - uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; + uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ]; struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ /* Pointers to the constructed SLI4 queues */ @@ -512,10 +530,21 @@ struct lpfc_sli4_hba { struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ - uint8_t fw_func_mode; /* FW function protocol mode */ + uint32_t fw_func_mode; /* FW function protocol mode */ uint32_t ulp0_mode; /* ULP0 protocol mode */ uint32_t ulp1_mode; /* ULP1 protocol mode */ + struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */ + + /* Optimized Access Storage specific queues/structures */ + + struct lpfc_queue *oas_cq; /* OAS completion queue */ + struct lpfc_queue *oas_wq; /* OAS Work queue */ + struct lpfc_sli_ring *oas_ring; + uint64_t oas_next_lun; + uint8_t oas_next_tgt_wwpn[8]; + uint8_t oas_next_vpt_wwpn[8]; + /* Setup information for various queue parameters */ int eq_esize; int eq_ecount; @@ -573,6 +602,12 @@ struct lpfc_sli4_hba { struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + + /* CPU to vector mapping information */ + struct lpfc_vector_map_info *cpu_map; + uint16_t num_online_cpu; + uint16_t num_present_cpu; + uint16_t curr_disp_cpu; }; enum lpfc_sge_type { @@ -657,6 +692,7 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +void lpfc_sli4_free_xri(struct lpfc_hba *, int); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index f3b7795a296..41675c1193e 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.37" +#define LPFC_DRIVER_VERSION "10.2.8001.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -30,4 +30,4 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2009 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved." diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 0fe188e6600..a87ee33f4f2 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2013 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport, } } -static int +int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; @@ -387,6 +387,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) /* Create binary sysfs attribute for vport */ lpfc_alloc_sysfs_attr(vport); + /* Set the DFT_LUN_Q_DEPTH accordingly */ + vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; + *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; @@ -568,6 +571,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; long timeout; + bool ns_ndlp_referenced = false; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, @@ -628,6 +632,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_debugfs_terminate(vport); + /* + * The call to fc_remove_host might release the NameServer ndlp. Since + * we might need to use the ndlp to send the DA_ID CT command, + * increment the reference for the NameServer ndlp to prevent it from + * being released. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_nlp_get(ndlp); + ns_ndlp_referenced = true; + } + /* Remove FC host and then SCSI host with the vport */ fc_remove_host(lpfc_shost_from_vport(vport)); scsi_remove_host(lpfc_shost_from_vport(vport)); @@ -734,6 +750,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_discovery_wait(vport); skip_logo: + + /* + * If the NameServer ndlp has been incremented to allow the DA_ID CT + * command to be sent, decrement the ndlp now. + */ + if (ns_ndlp_referenced) { + ndlp = lpfc_findnode_did(vport, NameServer_DID); + lpfc_nlp_put(ndlp); + } + lpfc_cleanup(vport); lpfc_sli_host_down(vport); diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 90828340ace..6b2c94eb813 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h @@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); +int lpfc_alloc_vpi(struct lpfc_hba *phba); /* * queuecommand VPORT-specific return codes. Specified in the host byte code. diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 24828b54773..6a039eb1cbc 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -25,10 +25,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: mac_NCR5380.c,v $ - */ - #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> @@ -58,12 +54,6 @@ #include "NCR5380.h" -#if 0 -#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION) -#else -#define NDEBUG (NDEBUG_ABORT) -#endif - #define RESET_BOOT #define DRIVER_SETUP @@ -260,6 +250,8 @@ int __init macscsi_detect(struct scsi_host_template * tpnt) /* Once we support multiple 5380s (e.g. DuoDock) we'll do something different here */ instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); + if (instance == NULL) + return 0; if (macintosh_config->ident == MAC_MODEL_IIFX) { mac_scsi_regp = via1+0x8000; @@ -561,7 +553,8 @@ static int macscsi_pwrite (struct Scsi_Host *instance, static struct scsi_host_template driver_template = { .proc_name = "Mac5380", - .proc_info = macscsi_proc_info, + .show_info = macscsi_show_info, + .write_info = macscsi_write_info, .name = "Macintosh NCR5380 SCSI", .detect = macscsi_detect, .release = macscsi_release, diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h index d26e331c6c1..06969b06e54 100644 --- a/drivers/scsi/mac_scsi.h +++ b/drivers/scsi/mac_scsi.h @@ -22,10 +22,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: cumana_NCR5380.h,v $ - */ - #ifndef MAC_NCR5380_H #define MAC_NCR5380_H @@ -51,8 +47,6 @@ #include <scsi/scsicam.h> -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ int port, ctrl @@ -72,12 +66,9 @@ #define NCR5380_queue_command macscsi_queue_command #define NCR5380_abort macscsi_abort #define NCR5380_bus_reset macscsi_bus_reset -#define NCR5380_proc_info macscsi_proc_info - -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 +#define NCR5380_show_info macscsi_show_info +#define NCR5380_write_info macscsi_write_info -#endif /* ndef HOSTS_C */ #endif /* ndef ASM */ #endif /* MAC_NCR5380_H */ diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 9504ec0ec68..b7770516f4c 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -39,6 +39,7 @@ #include <linux/completion.h> #include <linux/delay.h> #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/reboot.h> #include <linux/module.h> #include <linux/list.h> @@ -530,13 +531,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) int target = 0; int ldrv_num = 0; /* logical drive number */ - - /* - * filter the internal and ioctl commands - */ - if((cmd->cmnd[0] == MEGA_INTERNAL_CMD)) - return (scb_t *)cmd->host_scribble; - /* * We know what channels our logical drives are on - mega_find_card() */ @@ -1438,19 +1432,22 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) cmdid = completed[i]; - if( cmdid == CMDID_INT_CMDS ) { /* internal command */ + /* + * Only free SCBs for the commands coming down from the + * mid-layer, not for which were issued internally + * + * For internal command, restore the status returned by the + * firmware so that user can interpret it. + */ + if (cmdid == CMDID_INT_CMDS) { scb = &adapter->int_scb; - cmd = scb->cmd; - mbox = (mbox_t *)scb->raw_mbox; - /* - * Internal command interface do not fire the extended - * passthru or 64-bit passthru - */ - pthru = scb->pthru; + list_del_init(&scb->list); + scb->state = SCB_FREE; - } - else { + adapter->int_status = status; + complete(&adapter->int_waitq); + } else { scb = &adapter->scb_list[cmdid]; /* @@ -1639,25 +1636,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) cmd->result |= (DID_BAD_TARGET << 16)|status; } - /* - * Only free SCBs for the commands coming down from the - * mid-layer, not for which were issued internally - * - * For internal command, restore the status returned by the - * firmware so that user can interpret it. - */ - if( cmdid == CMDID_INT_CMDS ) { /* internal command */ - cmd->result = status; - - /* - * Remove the internal command from the pending list - */ - list_del_init(&scb->list); - scb->state = SCB_FREE; - } - else { - mega_free_scb(adapter, scb); - } + mega_free_scb(adapter, scb); /* Add Scsi_Command to end of completed queue */ list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); @@ -2025,7 +2004,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) static inline int make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) { - *pdev = alloc_pci_dev(); + *pdev = pci_alloc_dev(NULL); if( *pdev == NULL ) return -1; @@ -2069,385 +2048,201 @@ mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev) #ifdef CONFIG_PROC_FS /* Following code handles /proc fs */ -#define CREATE_READ_PROC(string, func) create_proc_read_entry(string, \ - S_IRUSR | S_IFREG, \ - controller_proc_dir_entry, \ - func, adapter) - -/** - * mega_create_proc_entry() - * @index - index in soft state array - * @parent - parent node for this /proc entry - * - * Creates /proc entries for our controllers. - */ -static void -mega_create_proc_entry(int index, struct proc_dir_entry *parent) -{ - struct proc_dir_entry *controller_proc_dir_entry = NULL; - u8 string[64] = { 0 }; - adapter_t *adapter = hba_soft_state[index]; - - sprintf(string, "hba%d", adapter->host->host_no); - - controller_proc_dir_entry = - adapter->controller_proc_dir_entry = proc_mkdir(string, parent); - - if(!controller_proc_dir_entry) { - printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n"); - return; - } - adapter->proc_read = CREATE_READ_PROC("config", proc_read_config); - adapter->proc_stat = CREATE_READ_PROC("stat", proc_read_stat); - adapter->proc_mbox = CREATE_READ_PROC("mailbox", proc_read_mbox); -#if MEGA_HAVE_ENH_PROC - adapter->proc_rr = CREATE_READ_PROC("rebuild-rate", proc_rebuild_rate); - adapter->proc_battery = CREATE_READ_PROC("battery-status", - proc_battery); - - /* - * Display each physical drive on its channel - */ - adapter->proc_pdrvstat[0] = CREATE_READ_PROC("diskdrives-ch0", - proc_pdrv_ch0); - adapter->proc_pdrvstat[1] = CREATE_READ_PROC("diskdrives-ch1", - proc_pdrv_ch1); - adapter->proc_pdrvstat[2] = CREATE_READ_PROC("diskdrives-ch2", - proc_pdrv_ch2); - adapter->proc_pdrvstat[3] = CREATE_READ_PROC("diskdrives-ch3", - proc_pdrv_ch3); - - /* - * Display a set of up to 10 logical drive through each of following - * /proc entries - */ - adapter->proc_rdrvstat[0] = CREATE_READ_PROC("raiddrives-0-9", - proc_rdrv_10); - adapter->proc_rdrvstat[1] = CREATE_READ_PROC("raiddrives-10-19", - proc_rdrv_20); - adapter->proc_rdrvstat[2] = CREATE_READ_PROC("raiddrives-20-29", - proc_rdrv_30); - adapter->proc_rdrvstat[3] = CREATE_READ_PROC("raiddrives-30-39", - proc_rdrv_40); -#endif -} - - /** - * proc_read_config() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_config() + * @m - Synthetic file construction data + * @v - File iterator * * Display configuration information about the controller. */ static int -proc_read_config(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_config(struct seq_file *m, void *v) { - adapter_t *adapter = (adapter_t *)data; - int len = 0; - - len += sprintf(page+len, "%s", MEGARAID_VERSION); + adapter_t *adapter = m->private; + seq_puts(m, MEGARAID_VERSION); if(adapter->product_info.product_name[0]) - len += sprintf(page+len, "%s\n", - adapter->product_info.product_name); + seq_printf(m, "%s\n", adapter->product_info.product_name); - len += sprintf(page+len, "Controller Type: "); + seq_puts(m, "Controller Type: "); - if( adapter->flag & BOARD_MEMMAP ) { - len += sprintf(page+len, - "438/466/467/471/493/518/520/531/532\n"); - } - else { - len += sprintf(page+len, - "418/428/434\n"); - } + if( adapter->flag & BOARD_MEMMAP ) + seq_puts(m, "438/466/467/471/493/518/520/531/532\n"); + else + seq_puts(m, "418/428/434\n"); - if(adapter->flag & BOARD_40LD) { - len += sprintf(page+len, - "Controller Supports 40 Logical Drives\n"); - } + if(adapter->flag & BOARD_40LD) + seq_puts(m, "Controller Supports 40 Logical Drives\n"); - if(adapter->flag & BOARD_64BIT) { - len += sprintf(page+len, - "Controller capable of 64-bit memory addressing\n"); - } - if( adapter->has_64bit_addr ) { - len += sprintf(page+len, - "Controller using 64-bit memory addressing\n"); - } - else { - len += sprintf(page+len, - "Controller is not using 64-bit memory addressing\n"); - } - - len += sprintf(page+len, "Base = %08lx, Irq = %d, ", adapter->base, - adapter->host->irq); - - len += sprintf(page+len, "Logical Drives = %d, Channels = %d\n", - adapter->numldrv, adapter->product_info.nchannels); - - len += sprintf(page+len, "Version =%s:%s, DRAM = %dMb\n", - adapter->fw_version, adapter->bios_version, - adapter->product_info.dram_size); - - len += sprintf(page+len, - "Controller Queue Depth = %d, Driver Queue Depth = %d\n", - adapter->product_info.max_commands, adapter->max_cmds); - - len += sprintf(page+len, "support_ext_cdb = %d\n", - adapter->support_ext_cdb); - len += sprintf(page+len, "support_random_del = %d\n", - adapter->support_random_del); - len += sprintf(page+len, "boot_ldrv_enabled = %d\n", - adapter->boot_ldrv_enabled); - len += sprintf(page+len, "boot_ldrv = %d\n", - adapter->boot_ldrv); - len += sprintf(page+len, "boot_pdrv_enabled = %d\n", - adapter->boot_pdrv_enabled); - len += sprintf(page+len, "boot_pdrv_ch = %d\n", - adapter->boot_pdrv_ch); - len += sprintf(page+len, "boot_pdrv_tgt = %d\n", - adapter->boot_pdrv_tgt); - len += sprintf(page+len, "quiescent = %d\n", - atomic_read(&adapter->quiescent)); - len += sprintf(page+len, "has_cluster = %d\n", - adapter->has_cluster); - - len += sprintf(page+len, "\nModule Parameters:\n"); - len += sprintf(page+len, "max_cmd_per_lun = %d\n", - max_cmd_per_lun); - len += sprintf(page+len, "max_sectors_per_io = %d\n", - max_sectors_per_io); - - *eof = 1; - - return len; + if(adapter->flag & BOARD_64BIT) + seq_puts(m, "Controller capable of 64-bit memory addressing\n"); + if( adapter->has_64bit_addr ) + seq_puts(m, "Controller using 64-bit memory addressing\n"); + else + seq_puts(m, "Controller is not using 64-bit memory addressing\n"); + + seq_printf(m, "Base = %08lx, Irq = %d, ", + adapter->base, adapter->host->irq); + + seq_printf(m, "Logical Drives = %d, Channels = %d\n", + adapter->numldrv, adapter->product_info.nchannels); + + seq_printf(m, "Version =%s:%s, DRAM = %dMb\n", + adapter->fw_version, adapter->bios_version, + adapter->product_info.dram_size); + + seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n", + adapter->product_info.max_commands, adapter->max_cmds); + + seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb); + seq_printf(m, "support_random_del = %d\n", adapter->support_random_del); + seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled); + seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv); + seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled); + seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch); + seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt); + seq_printf(m, "quiescent = %d\n", + atomic_read(&adapter->quiescent)); + seq_printf(m, "has_cluster = %d\n", adapter->has_cluster); + + seq_puts(m, "\nModule Parameters:\n"); + seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun); + seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io); + return 0; } - - /** - * proc_read_stat() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_stat() + * @m - Synthetic file construction data + * @v - File iterator * - * Diaplay statistical information about the I/O activity. + * Display statistical information about the I/O activity. */ static int -proc_read_stat(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_stat(struct seq_file *m, void *v) { - adapter_t *adapter; - int len; + adapter_t *adapter = m->private; +#if MEGA_HAVE_STATS int i; +#endif - i = 0; /* avoid compilation warnings */ - len = 0; - adapter = (adapter_t *)data; - - len = sprintf(page, "Statistical Information for this controller\n"); - len += sprintf(page+len, "pend_cmds = %d\n", - atomic_read(&adapter->pend_cmds)); + seq_puts(m, "Statistical Information for this controller\n"); + seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds)); #if MEGA_HAVE_STATS for(i = 0; i < adapter->numldrv; i++) { - len += sprintf(page+len, "Logical Drive %d:\n", i); - - len += sprintf(page+len, - "\tReads Issued = %lu, Writes Issued = %lu\n", - adapter->nreads[i], adapter->nwrites[i]); - - len += sprintf(page+len, - "\tSectors Read = %lu, Sectors Written = %lu\n", - adapter->nreadblocks[i], adapter->nwriteblocks[i]); - - len += sprintf(page+len, - "\tRead errors = %lu, Write errors = %lu\n\n", - adapter->rd_errors[i], adapter->wr_errors[i]); + seq_printf(m, "Logical Drive %d:\n", i); + seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n", + adapter->nreads[i], adapter->nwrites[i]); + seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n", + adapter->nreadblocks[i], adapter->nwriteblocks[i]); + seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n", + adapter->rd_errors[i], adapter->wr_errors[i]); } #else - len += sprintf(page+len, - "IO and error counters not compiled in driver.\n"); + seq_puts(m, "IO and error counters not compiled in driver.\n"); #endif - - *eof = 1; - - return len; + return 0; } /** - * proc_read_mbox() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_mbox() + * @m - Synthetic file construction data + * @v - File iterator * * Display mailbox information for the last command issued. This information * is good for debugging. */ static int -proc_read_mbox(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_mbox(struct seq_file *m, void *v) { - - adapter_t *adapter = (adapter_t *)data; + adapter_t *adapter = m->private; volatile mbox_t *mbox = adapter->mbox; - int len = 0; - - len = sprintf(page, "Contents of Mail Box Structure\n"); - len += sprintf(page+len, " Fw Command = 0x%02x\n", - mbox->m_out.cmd); - len += sprintf(page+len, " Cmd Sequence = 0x%02x\n", - mbox->m_out.cmdid); - len += sprintf(page+len, " No of Sectors= %04d\n", - mbox->m_out.numsectors); - len += sprintf(page+len, " LBA = 0x%02x\n", - mbox->m_out.lba); - len += sprintf(page+len, " DTA = 0x%08x\n", - mbox->m_out.xferaddr); - len += sprintf(page+len, " Logical Drive= 0x%02x\n", - mbox->m_out.logdrv); - len += sprintf(page+len, " No of SG Elmt= 0x%02x\n", - mbox->m_out.numsgelements); - len += sprintf(page+len, " Busy = %01x\n", - mbox->m_in.busy); - len += sprintf(page+len, " Status = 0x%02x\n", - mbox->m_in.status); - - *eof = 1; - - return len; + + seq_puts(m, "Contents of Mail Box Structure\n"); + seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd); + seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid); + seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors); + seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba); + seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr); + seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv); + seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements); + seq_printf(m, " Busy = %01x\n", mbox->m_in.busy); + seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status); + return 0; } /** - * proc_rebuild_rate() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_rebuild_rate() + * @m - Synthetic file construction data + * @v - File iterator * * Display current rebuild rate */ static int -proc_rebuild_rate(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_rebuild_rate(struct seq_file *m, void *v) { - adapter_t *adapter = (adapter_t *)data; + adapter_t *adapter = m->private; dma_addr_t dma_handle; caddr_t inquiry; struct pci_dev *pdev; - int len = 0; - if( make_local_pdev(adapter, &pdev) != 0 ) { - *eof = 1; - return len; - } + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; - if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { - free_local_pdev(pdev); - *eof = 1; - return len; - } + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) + goto free_pdev; if( mega_adapinq(adapter, dma_handle) != 0 ) { - - len = sprintf(page, "Adapter inquiry failed.\n"); - + seq_puts(m, "Adapter inquiry failed.\n"); printk(KERN_WARNING "megaraid: inquiry failed.\n"); - - mega_free_inquiry(inquiry, dma_handle, pdev); - - free_local_pdev(pdev); - - *eof = 1; - - return len; + goto free_inquiry; } - if( adapter->flag & BOARD_40LD ) { - len = sprintf(page, "Rebuild Rate: [%d%%]\n", - ((mega_inquiry3 *)inquiry)->rebuild_rate); - } - else { - len = sprintf(page, "Rebuild Rate: [%d%%]\n", + if( adapter->flag & BOARD_40LD ) + seq_printf(m, "Rebuild Rate: [%d%%]\n", + ((mega_inquiry3 *)inquiry)->rebuild_rate); + else + seq_printf(m, "Rebuild Rate: [%d%%]\n", ((mraid_ext_inquiry *) - inquiry)->raid_inq.adapter_info.rebuild_rate); - } - + inquiry)->raid_inq.adapter_info.rebuild_rate); +free_inquiry: mega_free_inquiry(inquiry, dma_handle, pdev); - +free_pdev: free_local_pdev(pdev); - - *eof = 1; - - return len; + return 0; } /** - * proc_battery() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_battery() + * @m - Synthetic file construction data + * @v - File iterator * * Display information about the battery module on the controller. */ static int -proc_battery(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_battery(struct seq_file *m, void *v) { - adapter_t *adapter = (adapter_t *)data; + adapter_t *adapter = m->private; dma_addr_t dma_handle; caddr_t inquiry; struct pci_dev *pdev; - u8 battery_status = 0; - char str[256]; - int len = 0; + u8 battery_status; - if( make_local_pdev(adapter, &pdev) != 0 ) { - *eof = 1; - return len; - } + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; - if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { - free_local_pdev(pdev); - *eof = 1; - return len; - } + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) + goto free_pdev; if( mega_adapinq(adapter, dma_handle) != 0 ) { - - len = sprintf(page, "Adapter inquiry failed.\n"); - + seq_printf(m, "Adapter inquiry failed.\n"); printk(KERN_WARNING "megaraid: inquiry failed.\n"); - - mega_free_inquiry(inquiry, dma_handle, pdev); - - free_local_pdev(pdev); - - *eof = 1; - - return len; + goto free_inquiry; } if( adapter->flag & BOARD_40LD ) { @@ -2461,146 +2256,80 @@ proc_battery(char *page, char **start, off_t offset, int count, int *eof, /* * Decode the battery status */ - sprintf(str, "Battery Status:[%d]", battery_status); + seq_printf(m, "Battery Status:[%d]", battery_status); if(battery_status == MEGA_BATT_CHARGE_DONE) - strcat(str, " Charge Done"); + seq_puts(m, " Charge Done"); if(battery_status & MEGA_BATT_MODULE_MISSING) - strcat(str, " Module Missing"); + seq_puts(m, " Module Missing"); if(battery_status & MEGA_BATT_LOW_VOLTAGE) - strcat(str, " Low Voltage"); + seq_puts(m, " Low Voltage"); if(battery_status & MEGA_BATT_TEMP_HIGH) - strcat(str, " Temperature High"); + seq_puts(m, " Temperature High"); if(battery_status & MEGA_BATT_PACK_MISSING) - strcat(str, " Pack Missing"); + seq_puts(m, " Pack Missing"); if(battery_status & MEGA_BATT_CHARGE_INPROG) - strcat(str, " Charge In-progress"); + seq_puts(m, " Charge In-progress"); if(battery_status & MEGA_BATT_CHARGE_FAIL) - strcat(str, " Charge Fail"); + seq_puts(m, " Charge Fail"); if(battery_status & MEGA_BATT_CYCLES_EXCEEDED) - strcat(str, " Cycles Exceeded"); - - len = sprintf(page, "%s\n", str); + seq_puts(m, " Cycles Exceeded"); + seq_putc(m, '\n'); +free_inquiry: mega_free_inquiry(inquiry, dma_handle, pdev); - +free_pdev: free_local_pdev(pdev); - - *eof = 1; - - return len; -} - - -/** - * proc_pdrv_ch0() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state - * - * Display information about the physical drives on physical channel 0. - */ -static int -proc_pdrv_ch0(char *page, char **start, off_t offset, int count, int *eof, - void *data) -{ - adapter_t *adapter = (adapter_t *)data; - - *eof = 1; - - return (proc_pdrv(adapter, page, 0)); -} - - -/** - * proc_pdrv_ch1() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state - * - * Display information about the physical drives on physical channel 1. - */ -static int -proc_pdrv_ch1(char *page, char **start, off_t offset, int count, int *eof, - void *data) -{ - adapter_t *adapter = (adapter_t *)data; - - *eof = 1; - - return (proc_pdrv(adapter, page, 1)); + return 0; } -/** - * proc_pdrv_ch2() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state - * - * Display information about the physical drives on physical channel 2. +/* + * Display scsi inquiry */ -static int -proc_pdrv_ch2(char *page, char **start, off_t offset, int count, int *eof, - void *data) +static void +mega_print_inquiry(struct seq_file *m, char *scsi_inq) { - adapter_t *adapter = (adapter_t *)data; - - *eof = 1; - - return (proc_pdrv(adapter, page, 2)); -} + int i; + seq_puts(m, " Vendor: "); + seq_write(m, scsi_inq + 8, 8); + seq_puts(m, " Model: "); + seq_write(m, scsi_inq + 16, 16); + seq_puts(m, " Rev: "); + seq_write(m, scsi_inq + 32, 4); + seq_putc(m, '\n'); -/** - * proc_pdrv_ch3() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state - * - * Display information about the physical drives on physical channel 3. - */ -static int -proc_pdrv_ch3(char *page, char **start, off_t offset, int count, int *eof, - void *data) -{ - adapter_t *adapter = (adapter_t *)data; + i = scsi_inq[0] & 0x1f; + seq_printf(m, " Type: %s ", scsi_device_type(i)); - *eof = 1; + seq_printf(m, " ANSI SCSI revision: %02x", + scsi_inq[2] & 0x07); - return (proc_pdrv(adapter, page, 3)); + if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) + seq_puts(m, " CCS\n"); + else + seq_putc(m, '\n'); } - /** - * proc_pdrv() + * proc_show_pdrv() + * @m - Synthetic file construction data * @page - buffer to write the data in * @adapter - pointer to our soft state * * Display information about the physical drives. */ static int -proc_pdrv(adapter_t *adapter, char *page, int channel) +proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel) { dma_addr_t dma_handle; char *scsi_inq; @@ -2611,32 +2340,24 @@ proc_pdrv(adapter_t *adapter, char *page, int channel) u8 state; int tgt; int max_channels; - int len = 0; - char str[80]; int i; - if( make_local_pdev(adapter, &pdev) != 0 ) { - return len; - } + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; - if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) goto free_pdev; - } if( mega_adapinq(adapter, dma_handle) != 0 ) { - len = sprintf(page, "Adapter inquiry failed.\n"); - + seq_puts(m, "Adapter inquiry failed.\n"); printk(KERN_WARNING "megaraid: inquiry failed.\n"); - goto free_inquiry; } scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle); - if( scsi_inq == NULL ) { - len = sprintf(page, "memory not available for scsi inq.\n"); - + seq_puts(m, "memory not available for scsi inq.\n"); goto free_inquiry; } @@ -2659,39 +2380,31 @@ proc_pdrv(adapter_t *adapter, char *page, int channel) i = channel*16 + tgt; state = *(pdrv_state + i); - switch( state & 0x0F ) { - case PDRV_ONLINE: - sprintf(str, - "Channel:%2d Id:%2d State: Online", - channel, tgt); + seq_printf(m, "Channel:%2d Id:%2d State: Online", + channel, tgt); break; case PDRV_FAILED: - sprintf(str, - "Channel:%2d Id:%2d State: Failed", - channel, tgt); + seq_printf(m, "Channel:%2d Id:%2d State: Failed", + channel, tgt); break; case PDRV_RBLD: - sprintf(str, - "Channel:%2d Id:%2d State: Rebuild", - channel, tgt); + seq_printf(m, "Channel:%2d Id:%2d State: Rebuild", + channel, tgt); break; case PDRV_HOTSPARE: - sprintf(str, - "Channel:%2d Id:%2d State: Hot spare", - channel, tgt); + seq_printf(m, "Channel:%2d Id:%2d State: Hot spare", + channel, tgt); break; default: - sprintf(str, - "Channel:%2d Id:%2d State: Un-configured", - channel, tgt); + seq_printf(m, "Channel:%2d Id:%2d State: Un-configured", + channel, tgt); break; - } /* @@ -2710,11 +2423,8 @@ proc_pdrv(adapter_t *adapter, char *page, int channel) * Check for overflow. We print less than 240 * characters for inquiry */ - if( (len + 240) >= PAGE_SIZE ) break; - - len += sprintf(page+len, "%s.\n", str); - - len += mega_print_inquiry(page+len, scsi_inq); + seq_puts(m, ".\n"); + mega_print_inquiry(m, scsi_inq); } free_pci: @@ -2723,150 +2433,68 @@ free_inquiry: mega_free_inquiry(inquiry, dma_handle, pdev); free_pdev: free_local_pdev(pdev); - - return len; -} - - -/* - * Display scsi inquiry - */ -static int -mega_print_inquiry(char *page, char *scsi_inq) -{ - int len = 0; - int i; - - len = sprintf(page, " Vendor: "); - for( i = 8; i < 16; i++ ) { - len += sprintf(page+len, "%c", scsi_inq[i]); - } - - len += sprintf(page+len, " Model: "); - - for( i = 16; i < 32; i++ ) { - len += sprintf(page+len, "%c", scsi_inq[i]); - } - - len += sprintf(page+len, " Rev: "); - - for( i = 32; i < 36; i++ ) { - len += sprintf(page+len, "%c", scsi_inq[i]); - } - - len += sprintf(page+len, "\n"); - - i = scsi_inq[0] & 0x1f; - - len += sprintf(page+len, " Type: %s ", scsi_device_type(i)); - - len += sprintf(page+len, - " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07); - - if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 ) - len += sprintf(page+len, " CCS\n"); - else - len += sprintf(page+len, "\n"); - - return len; + return 0; } - /** - * proc_rdrv_10() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_pdrv_ch0() + * @m - Synthetic file construction data + * @v - File iterator * - * Display real time information about the logical drives 0 through 9. + * Display information about the physical drives on physical channel 0. */ static int -proc_rdrv_10(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_pdrv_ch0(struct seq_file *m, void *v) { - adapter_t *adapter = (adapter_t *)data; - - *eof = 1; - - return (proc_rdrv(adapter, page, 0, 9)); + return proc_show_pdrv(m, m->private, 0); } /** - * proc_rdrv_20() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_pdrv_ch1() + * @m - Synthetic file construction data + * @v - File iterator * - * Display real time information about the logical drives 0 through 9. + * Display information about the physical drives on physical channel 1. */ static int -proc_rdrv_20(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_pdrv_ch1(struct seq_file *m, void *v) { - adapter_t *adapter = (adapter_t *)data; - - *eof = 1; - - return (proc_rdrv(adapter, page, 10, 19)); + return proc_show_pdrv(m, m->private, 1); } /** - * proc_rdrv_30() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_pdrv_ch2() + * @m - Synthetic file construction data + * @v - File iterator * - * Display real time information about the logical drives 0 through 9. + * Display information about the physical drives on physical channel 2. */ static int -proc_rdrv_30(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_pdrv_ch2(struct seq_file *m, void *v) { - adapter_t *adapter = (adapter_t *)data; - - *eof = 1; - - return (proc_rdrv(adapter, page, 20, 29)); + return proc_show_pdrv(m, m->private, 2); } /** - * proc_rdrv_40() - * @page - buffer to write the data in - * @start - where the actual data has been written in page - * @offset - same meaning as the read system call - * @count - same meaning as the read system call - * @eof - set if no more data needs to be returned - * @data - pointer to our soft state + * proc_show_pdrv_ch3() + * @m - Synthetic file construction data + * @v - File iterator * - * Display real time information about the logical drives 0 through 9. + * Display information about the physical drives on physical channel 3. */ static int -proc_rdrv_40(char *page, char **start, off_t offset, int count, int *eof, - void *data) +proc_show_pdrv_ch3(struct seq_file *m, void *v) { - adapter_t *adapter = (adapter_t *)data; - - *eof = 1; - - return (proc_rdrv(adapter, page, 30, 39)); + return proc_show_pdrv(m, m->private, 3); } /** - * proc_rdrv() - * @page - buffer to write the data in + * proc_show_rdrv() + * @m - Synthetic file construction data * @adapter - pointer to our soft state * @start - starting logical drive to display * @end - ending logical drive to display @@ -2875,7 +2503,7 @@ proc_rdrv_40(char *page, char **start, off_t offset, int count, int *eof, * /proc/scsi/scsi interface */ static int -proc_rdrv(adapter_t *adapter, char *page, int start, int end ) +proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end ) { dma_addr_t dma_handle; logdrv_param *lparam; @@ -2887,29 +2515,18 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end ) u8 *rdrv_state; int num_ldrv; u32 array_sz; - int len = 0; int i; - if( make_local_pdev(adapter, &pdev) != 0 ) { - return len; - } + if( make_local_pdev(adapter, &pdev) != 0 ) + return 0; - if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) { - free_local_pdev(pdev); - return len; - } + if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL ) + goto free_pdev; if( mega_adapinq(adapter, dma_handle) != 0 ) { - - len = sprintf(page, "Adapter inquiry failed.\n"); - + seq_puts(m, "Adapter inquiry failed.\n"); printk(KERN_WARNING "megaraid: inquiry failed.\n"); - - mega_free_inquiry(inquiry, dma_handle, pdev); - - free_local_pdev(pdev); - - return len; + goto free_inquiry; } memset(&mc, 0, sizeof(megacmd_t)); @@ -2935,13 +2552,8 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end ) &disk_array_dma_handle); if( disk_array == NULL ) { - len = sprintf(page, "memory not available.\n"); - - mega_free_inquiry(inquiry, dma_handle, pdev); - - free_local_pdev(pdev); - - return len; + seq_puts(m, "memory not available.\n"); + goto free_inquiry; } mc.xferaddr = (u32)disk_array_dma_handle; @@ -2951,17 +2563,8 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end ) mc.opcode = OP_DCMD_READ_CONFIG; if( mega_internal_command(adapter, &mc, NULL) ) { - - len = sprintf(page, "40LD read config failed.\n"); - - mega_free_inquiry(inquiry, dma_handle, pdev); - - pci_free_consistent(pdev, array_sz, disk_array, - disk_array_dma_handle); - - free_local_pdev(pdev); - - return len; + seq_puts(m, "40LD read config failed.\n"); + goto free_pci; } } @@ -2969,24 +2572,10 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end ) mc.cmd = NEW_READ_CONFIG_8LD; if( mega_internal_command(adapter, &mc, NULL) ) { - mc.cmd = READ_CONFIG_8LD; - - if( mega_internal_command(adapter, &mc, - NULL) ){ - - len = sprintf(page, - "8LD read config failed.\n"); - - mega_free_inquiry(inquiry, dma_handle, pdev); - - pci_free_consistent(pdev, array_sz, - disk_array, - disk_array_dma_handle); - - free_local_pdev(pdev); - - return len; + if( mega_internal_command(adapter, &mc, NULL) ) { + seq_puts(m, "8LD read config failed.\n"); + goto free_pci; } } } @@ -3006,29 +2595,23 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end ) * Check for overflow. We print less than 240 characters for * information about each logical drive. */ - if( (len + 240) >= PAGE_SIZE ) break; - - len += sprintf(page+len, "Logical drive:%2d:, ", i); + seq_printf(m, "Logical drive:%2d:, ", i); switch( rdrv_state[i] & 0x0F ) { case RDRV_OFFLINE: - len += sprintf(page+len, "state: offline"); + seq_puts(m, "state: offline"); break; - case RDRV_DEGRADED: - len += sprintf(page+len, "state: degraded"); + seq_puts(m, "state: degraded"); break; - case RDRV_OPTIMAL: - len += sprintf(page+len, "state: optimal"); + seq_puts(m, "state: optimal"); break; - case RDRV_DELETED: - len += sprintf(page+len, "state: deleted"); + seq_puts(m, "state: deleted"); break; - default: - len += sprintf(page+len, "state: unknown"); + seq_puts(m, "state: unknown"); break; } @@ -3036,84 +2619,203 @@ proc_rdrv(adapter_t *adapter, char *page, int start, int end ) * Check if check consistency or initialization is going on * for this logical drive. */ - if( (rdrv_state[i] & 0xF0) == 0x20 ) { - len += sprintf(page+len, - ", check-consistency in progress"); - } - else if( (rdrv_state[i] & 0xF0) == 0x10 ) { - len += sprintf(page+len, - ", initialization in progress"); - } + if( (rdrv_state[i] & 0xF0) == 0x20 ) + seq_puts(m, ", check-consistency in progress"); + else if( (rdrv_state[i] & 0xF0) == 0x10 ) + seq_puts(m, ", initialization in progress"); - len += sprintf(page+len, "\n"); - - len += sprintf(page+len, "Span depth:%3d, ", - lparam->span_depth); + seq_putc(m, '\n'); - len += sprintf(page+len, "RAID level:%3d, ", - lparam->level); - - len += sprintf(page+len, "Stripe size:%3d, ", - lparam->stripe_sz ? lparam->stripe_sz/2: 128); - - len += sprintf(page+len, "Row size:%3d\n", - lparam->row_size); - - - len += sprintf(page+len, "Read Policy: "); + seq_printf(m, "Span depth:%3d, ", lparam->span_depth); + seq_printf(m, "RAID level:%3d, ", lparam->level); + seq_printf(m, "Stripe size:%3d, ", + lparam->stripe_sz ? lparam->stripe_sz/2: 128); + seq_printf(m, "Row size:%3d\n", lparam->row_size); + seq_puts(m, "Read Policy: "); switch(lparam->read_ahead) { - case NO_READ_AHEAD: - len += sprintf(page+len, "No read ahead, "); + seq_puts(m, "No read ahead, "); break; - case READ_AHEAD: - len += sprintf(page+len, "Read ahead, "); + seq_puts(m, "Read ahead, "); break; - case ADAP_READ_AHEAD: - len += sprintf(page+len, "Adaptive, "); + seq_puts(m, "Adaptive, "); break; } - len += sprintf(page+len, "Write Policy: "); - + seq_puts(m, "Write Policy: "); switch(lparam->write_mode) { - case WRMODE_WRITE_THRU: - len += sprintf(page+len, "Write thru, "); + seq_puts(m, "Write thru, "); break; - case WRMODE_WRITE_BACK: - len += sprintf(page+len, "Write back, "); + seq_puts(m, "Write back, "); break; } - len += sprintf(page+len, "Cache Policy: "); - + seq_puts(m, "Cache Policy: "); switch(lparam->direct_io) { - case CACHED_IO: - len += sprintf(page+len, "Cached IO\n\n"); + seq_puts(m, "Cached IO\n\n"); break; - case DIRECT_IO: - len += sprintf(page+len, "Direct IO\n\n"); + seq_puts(m, "Direct IO\n\n"); break; } } - mega_free_inquiry(inquiry, dma_handle, pdev); - +free_pci: pci_free_consistent(pdev, array_sz, disk_array, disk_array_dma_handle); - +free_inquiry: + mega_free_inquiry(inquiry, dma_handle, pdev); +free_pdev: free_local_pdev(pdev); + return 0; +} + +/** + * proc_show_rdrv_10() + * @m - Synthetic file construction data + * @v - File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_10(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 0, 9); +} + + +/** + * proc_show_rdrv_20() + * @m - Synthetic file construction data + * @v - File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_20(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 10, 19); +} + + +/** + * proc_show_rdrv_30() + * @m - Synthetic file construction data + * @v - File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_30(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 20, 29); +} + + +/** + * proc_show_rdrv_40() + * @m - Synthetic file construction data + * @v - File iterator + * + * Display real time information about the logical drives 0 through 9. + */ +static int +proc_show_rdrv_40(struct seq_file *m, void *v) +{ + return proc_show_rdrv(m, m->private, 30, 39); +} + + +/* + * seq_file wrappers for procfile show routines. + */ +static int mega_proc_open(struct inode *inode, struct file *file) +{ + adapter_t *adapter = proc_get_parent_data(inode); + int (*show)(struct seq_file *, void *) = PDE_DATA(inode); + + return single_open(file, show, adapter); +} + +static const struct file_operations mega_proc_fops = { + .open = mega_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Table of proc files we need to create. + */ +struct mega_proc_file { + const char *name; + unsigned short ptr_offset; + int (*show) (struct seq_file *m, void *v); +}; + +static const struct mega_proc_file mega_proc_files[] = { + { "config", offsetof(adapter_t, proc_read), proc_show_config }, + { "stat", offsetof(adapter_t, proc_stat), proc_show_stat }, + { "mailbox", offsetof(adapter_t, proc_mbox), proc_show_mbox }, +#if MEGA_HAVE_ENH_PROC + { "rebuild-rate", offsetof(adapter_t, proc_rr), proc_show_rebuild_rate }, + { "battery-status", offsetof(adapter_t, proc_battery), proc_show_battery }, + { "diskdrives-ch0", offsetof(adapter_t, proc_pdrvstat[0]), proc_show_pdrv_ch0 }, + { "diskdrives-ch1", offsetof(adapter_t, proc_pdrvstat[1]), proc_show_pdrv_ch1 }, + { "diskdrives-ch2", offsetof(adapter_t, proc_pdrvstat[2]), proc_show_pdrv_ch2 }, + { "diskdrives-ch3", offsetof(adapter_t, proc_pdrvstat[3]), proc_show_pdrv_ch3 }, + { "raiddrives-0-9", offsetof(adapter_t, proc_rdrvstat[0]), proc_show_rdrv_10 }, + { "raiddrives-10-19", offsetof(adapter_t, proc_rdrvstat[1]), proc_show_rdrv_20 }, + { "raiddrives-20-29", offsetof(adapter_t, proc_rdrvstat[2]), proc_show_rdrv_30 }, + { "raiddrives-30-39", offsetof(adapter_t, proc_rdrvstat[3]), proc_show_rdrv_40 }, +#endif + { NULL } +}; + +/** + * mega_create_proc_entry() + * @index - index in soft state array + * @parent - parent node for this /proc entry + * + * Creates /proc entries for our controllers. + */ +static void +mega_create_proc_entry(int index, struct proc_dir_entry *parent) +{ + const struct mega_proc_file *f; + adapter_t *adapter = hba_soft_state[index]; + struct proc_dir_entry *dir, *de, **ppde; + u8 string[16]; - return len; + sprintf(string, "hba%d", adapter->host->host_no); + + dir = adapter->controller_proc_dir_entry = + proc_mkdir_data(string, 0, parent, adapter); + if(!dir) { + printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n"); + return; + } + + for (f = mega_proc_files; f->name; f++) { + de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops, + f->show); + if (!de) { + printk(KERN_WARNING "\nmegaraid: proc_create failed\n"); + return; + } + + ppde = (void *)adapter + f->ptr_offset; + *ppde = de; + } } + #else static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent) { @@ -4409,23 +4111,15 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt, * The last argument is the address of the passthru structure if the command * to be fired is a passthru command * - * lockscope specifies whether the caller has already acquired the lock. Of - * course, the caller must know which lock we are talking about. - * * Note: parameter 'pthru' is null for non-passthru commands. */ static int mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) { - Scsi_Cmnd *scmd; - struct scsi_device *sdev; + unsigned long flags; scb_t *scb; int rval; - scmd = scsi_allocate_command(GFP_KERNEL); - if (!scmd) - return -ENOMEM; - /* * The internal commands share one command id and hence are * serialized. This is so because we want to reserve maximum number of @@ -4436,73 +4130,45 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) scb = &adapter->int_scb; memset(scb, 0, sizeof(scb_t)); - sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); - scmd->device = sdev; - - memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb)); - scmd->cmnd = adapter->int_cdb; - scmd->device->host = adapter->host; - scmd->host_scribble = (void *)scb; - scmd->cmnd[0] = MEGA_INTERNAL_CMD; - - scb->state |= SCB_ACTIVE; - scb->cmd = scmd; + scb->idx = CMDID_INT_CMDS; + scb->state |= SCB_ACTIVE | SCB_PENDQ; memcpy(scb->raw_mbox, mc, sizeof(megacmd_t)); /* * Is it a passthru command */ - if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { - + if (mc->cmd == MEGA_MBOXCMD_PASSTHRU) scb->pthru = pthru; - } - - scb->idx = CMDID_INT_CMDS; - megaraid_queue_lck(scmd, mega_internal_done); + spin_lock_irqsave(&adapter->lock, flags); + list_add_tail(&scb->list, &adapter->pending_list); + /* + * Check if the HBA is in quiescent state, e.g., during a + * delete logical drive opertion. If it is, don't run + * the pending_list. + */ + if (atomic_read(&adapter->quiescent) == 0) + mega_runpendq(adapter); + spin_unlock_irqrestore(&adapter->lock, flags); wait_for_completion(&adapter->int_waitq); - rval = scmd->result; - mc->status = scmd->result; - kfree(sdev); + mc->status = rval = adapter->int_status; /* * Print a debug message for all failed commands. Applications can use * this information. */ - if( scmd->result && trace_level ) { + if (rval && trace_level) { printk("megaraid: cmd [%x, %x, %x] status:[%x]\n", - mc->cmd, mc->opcode, mc->subopcode, scmd->result); + mc->cmd, mc->opcode, mc->subopcode, rval); } mutex_unlock(&adapter->int_mtx); - - scsi_free_command(GFP_KERNEL, scmd); - return rval; } - -/** - * mega_internal_done() - * @scmd - internal scsi command - * - * Callback routine for internal commands. - */ -static void -mega_internal_done(Scsi_Cmnd *scmd) -{ - adapter_t *adapter; - - adapter = (adapter_t *)scmd->device->host->hostdata; - - complete(&adapter->int_waitq); - -} - - static struct scsi_host_template megaraid_template = { .module = THIS_MODULE, .name = "MegaRAID", @@ -4520,6 +4186,7 @@ static struct scsi_host_template megaraid_template = { .eh_device_reset_handler = megaraid_reset, .eh_bus_reset_handler = megaraid_reset, .eh_host_reset_handler = megaraid_reset, + .no_write_same = 1, }; static int diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 4fb2adf6b80..508d65e5a51 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -853,10 +853,10 @@ typedef struct { u8 sglen; /* f/w supported scatter-gather list length */ - unsigned char int_cdb[MAX_COMMAND_SIZE]; scb_t int_scb; struct mutex int_mtx; /* To synchronize the internal commands */ + int int_status; /* status of internal cmd */ struct completion int_waitq; /* wait queue for internal cmds */ @@ -987,24 +987,7 @@ static int mega_init_scb (adapter_t *); static int mega_is_bios_enabled (adapter_t *); #ifdef CONFIG_PROC_FS -static int mega_print_inquiry(char *, char *); static void mega_create_proc_entry(int, struct proc_dir_entry *); -static int proc_read_config(char *, char **, off_t, int, int *, void *); -static int proc_read_stat(char *, char **, off_t, int, int *, void *); -static int proc_read_mbox(char *, char **, off_t, int, int *, void *); -static int proc_rebuild_rate(char *, char **, off_t, int, int *, void *); -static int proc_battery(char *, char **, off_t, int, int *, void *); -static int proc_pdrv_ch0(char *, char **, off_t, int, int *, void *); -static int proc_pdrv_ch1(char *, char **, off_t, int, int *, void *); -static int proc_pdrv_ch2(char *, char **, off_t, int, int *, void *); -static int proc_pdrv_ch3(char *, char **, off_t, int, int *, void *); -static int proc_pdrv(adapter_t *, char *, int); -static int proc_rdrv_10(char *, char **, off_t, int, int *, void *); -static int proc_rdrv_20(char *, char **, off_t, int, int *, void *); -static int proc_rdrv_30(char *, char **, off_t, int, int *, void *); -static int proc_rdrv_40(char *, char **, off_t, int, int *, void *); -static int proc_rdrv(adapter_t *, char *, int, int); - static int mega_adapinq(adapter_t *, dma_addr_t); static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t); #endif @@ -1021,7 +1004,6 @@ static int mega_del_logdrv(adapter_t *, int); static int mega_do_del_logdrv(adapter_t *, int); static void mega_get_max_sgl(adapter_t *); static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *); -static void mega_internal_done(Scsi_Cmnd *); static int mega_support_cluster(adapter_t *); #endif diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index e6a1e0b38a1..e2237a97cb9 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = { .eh_host_reset_handler = megaraid_reset_handler, .change_queue_depth = megaraid_change_queue_depth, .use_clustering = ENABLE_CLUSTERING, + .no_write_same = 1, .sdev_attrs = megaraid_sdev_attrs, .shost_attrs = megaraid_shost_attrs, }; @@ -534,7 +535,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; out_cmm_unreg: - pci_set_drvdata(pdev, NULL); megaraid_cmm_unregister(adapter); out_fini_mbox: megaraid_fini_mbox(adapter); @@ -549,7 +549,7 @@ out_probe_one: /** * megaraid_detach_one - release framework resources and call LLD release routine - * @pdev : handle for our PCI cofiguration space + * @pdev : handle for our PCI configuration space * * This routine is called during driver unload. We free all the allocated * resources and call the corresponding LLD so that it can also release all @@ -594,11 +594,6 @@ megaraid_detach_one(struct pci_dev *pdev) // detach from the IO sub-system megaraid_io_detach(adapter); - // reset the device state in the PCI structure. We check this - // condition when we enter here. If the device state is NULL, - // that would mean the device has already been removed - pci_set_drvdata(pdev, NULL); - // Unregister from common management module // // FIXME: this must return success or failure for conditions if there @@ -979,7 +974,7 @@ megaraid_fini_mbox(adapter_t *adapter) * @adapter : soft state of the raid controller * * Allocate and align the shared mailbox. This maibox is used to issue - * all the commands. For IO based controllers, the mailbox is also regsitered + * all the commands. For IO based controllers, the mailbox is also registered * with the FW. Allocate memory for all commands as well. * This is our big allocator. */ @@ -2027,7 +2022,7 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, * @scb : scsi control block * @scp : scsi command from the mid-layer * - * Prepare a command for the scsi physical devices. This rountine prepares + * Prepare a command for the scsi physical devices. This routine prepares * commands for devices which can take extended CDBs (>10 bytes). */ static void @@ -2586,7 +2581,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp) } /** - * megaraid_reset_handler - device reset hadler for mailbox based driver + * megaraid_reset_handler - device reset handler for mailbox based driver * @scp : reference command * * Reset handler for the mailbox based controller. First try to find out if @@ -3446,7 +3441,7 @@ megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) * megaraid_mbox_setup_device_map - manage device ids * @adapter : Driver's soft state * - * Manange the device ids to have an appropriate mapping between the kernel + * Manage the device ids to have an appropriate mapping between the kernel * scsi addresses and megaraid scsi and logical drive addresses. We export * scsi devices on their actual addresses, whereas the logical drives are * exported on a virtual scsi channel. diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 25506c77738..a70692779a1 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) pthru32->dataxferaddr = kioc->buf_paddr; if (kioc->data_dir & UIOC_WR) { + if (pthru32->dataxferlen > kioc->xferlen) + return -EINVAL; if (copy_from_user(kioc->buf_vaddr, kioc->user_data, pthru32->dataxferlen)) { return (-EFAULT); @@ -896,7 +898,7 @@ hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) /** * mraid_mm_register_adp - Registration routine for low level drivers - * @lld_adp : Adapter objejct + * @lld_adp : Adapter object */ int mraid_mm_register_adp(mraid_mmadp_t *lld_adp) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 408d2548a74..32166c2c785 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "06.506.00.00-rc1" -#define MEGASAS_RELDATE "Feb. 9, 2013" -#define MEGASAS_EXT_VERSION "Sat. Feb. 9 17:00:00 PDT 2013" +#define MEGASAS_VERSION "06.803.01.00-rc1" +#define MEGASAS_RELDATE "Mar. 10, 2014" +#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014" /* * Device IDs @@ -48,7 +48,35 @@ #define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 #define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 #define PCI_DEVICE_ID_LSI_FUSION 0x005b +#define PCI_DEVICE_ID_LSI_PLASMA 0x002f #define PCI_DEVICE_ID_LSI_INVADER 0x005d +#define PCI_DEVICE_ID_LSI_FURY 0x005f + +/* + * Intel HBA SSDIDs + */ +#define MEGARAID_INTEL_RS3DC080_SSDID 0x9360 +#define MEGARAID_INTEL_RS3DC040_SSDID 0x9362 +#define MEGARAID_INTEL_RS3SC008_SSDID 0x9380 +#define MEGARAID_INTEL_RS3MC044_SSDID 0x9381 +#define MEGARAID_INTEL_RS3WC080_SSDID 0x9341 +#define MEGARAID_INTEL_RS3WC040_SSDID 0x9343 + +/* + * Intel HBA branding + */ +#define MEGARAID_INTEL_RS3DC080_BRANDING \ + "Intel(R) RAID Controller RS3DC080" +#define MEGARAID_INTEL_RS3DC040_BRANDING \ + "Intel(R) RAID Controller RS3DC040" +#define MEGARAID_INTEL_RS3SC008_BRANDING \ + "Intel(R) RAID Controller RS3SC008" +#define MEGARAID_INTEL_RS3MC044_BRANDING \ + "Intel(R) RAID Controller RS3MC044" +#define MEGARAID_INTEL_RS3WC080_BRANDING \ + "Intel(R) RAID Controller RS3WC080" +#define MEGARAID_INTEL_RS3WC040_BRANDING \ + "Intel(R) RAID Controller RS3WC040" /* * ===================================== @@ -143,6 +171,7 @@ #define MR_DCMD_CTRL_GET_INFO 0x01010000 #define MR_DCMD_LD_GET_LIST 0x03010000 +#define MR_DCMD_LD_LIST_QUERY 0x03010100 #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 #define MR_FLUSH_CTRL_CACHE 0x01 @@ -163,6 +192,12 @@ #define MR_DCMD_PD_LIST_QUERY 0x02010100 /* + * Global functions + */ +extern u8 MR_ValidateMapInfo(struct megasas_instance *instance); + + +/* * MFI command completion codes */ enum MFI_STAT { @@ -312,6 +347,15 @@ enum MR_PD_QUERY_TYPE { MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, }; +enum MR_LD_QUERY_TYPE { + MR_LD_QUERY_TYPE_ALL = 0, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1, + MR_LD_QUERY_TYPE_USED_TGT_IDS = 2, + MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3, + MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4, +}; + + #define MR_EVT_CFG_CLEARED 0x0004 #define MR_EVT_LD_STATE_CHANGE 0x0051 #define MR_EVT_PD_INSERTED 0x005b @@ -402,6 +446,14 @@ struct MR_LD_LIST { } ldList[MAX_LOGICAL_DRIVES]; } __packed; +struct MR_LD_TARGETID_LIST { + u32 size; + u32 count; + u8 pad[3]; + u8 targetId[MAX_LOGICAL_DRIVES]; +}; + + /* * SAS controller properties */ @@ -441,21 +493,39 @@ struct megasas_ctrl_prop { * a bit in the following structure. */ struct { - u32 copyBackDisabled : 1; - u32 SMARTerEnabled : 1; - u32 prCorrectUnconfiguredAreas : 1; - u32 useFdeOnly : 1; - u32 disableNCQ : 1; - u32 SSDSMARTerEnabled : 1; - u32 SSDPatrolReadEnabled : 1; - u32 enableSpinDownUnconfigured : 1; - u32 autoEnhancedImport : 1; - u32 enableSecretKeyControl : 1; - u32 disableOnlineCtrlReset : 1; - u32 allowBootWithPinnedCache : 1; - u32 disableSpinDownHS : 1; - u32 enableJBOD : 1; - u32 reserved :18; +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:18; + u32 enableJBOD:1; + u32 disableSpinDownHS:1; + u32 allowBootWithPinnedCache:1; + u32 disableOnlineCtrlReset:1; + u32 enableSecretKeyControl:1; + u32 autoEnhancedImport:1; + u32 enableSpinDownUnconfigured:1; + u32 SSDPatrolReadEnabled:1; + u32 SSDSMARTerEnabled:1; + u32 disableNCQ:1; + u32 useFdeOnly:1; + u32 prCorrectUnconfiguredAreas:1; + u32 SMARTerEnabled:1; + u32 copyBackDisabled:1; +#else + u32 copyBackDisabled:1; + u32 SMARTerEnabled:1; + u32 prCorrectUnconfiguredAreas:1; + u32 useFdeOnly:1; + u32 disableNCQ:1; + u32 SSDSMARTerEnabled:1; + u32 SSDPatrolReadEnabled:1; + u32 enableSpinDownUnconfigured:1; + u32 autoEnhancedImport:1; + u32 enableSecretKeyControl:1; + u32 disableOnlineCtrlReset:1; + u32 allowBootWithPinnedCache:1; + u32 disableSpinDownHS:1; + u32 enableJBOD:1; + u32 reserved:18; +#endif } OnOffProperties; u8 autoSnapVDSpace; u8 viewSpace; @@ -490,7 +560,8 @@ struct megasas_ctrl_info { u8 PCIE:1; u8 iSCSI:1; u8 SAS_3G:1; - u8 reserved_0:4; + u8 SRIOV:1; + u8 reserved_0:3; u8 reserved_1[6]; u8 port_count; u64 port_addr[8]; @@ -702,8 +773,167 @@ struct megasas_ctrl_info { */ char package_version[0x60]; - u8 pad[0x800 - 0x6a0]; + /* + * If adapterOperations.supportMoreThan8Phys is set, + * and deviceInterface.portCount is greater than 8, + * SAS Addrs for first 8 ports shall be populated in + * deviceInterface.portAddr, and the rest shall be + * populated in deviceInterfacePortAddr2. + */ + u64 deviceInterfacePortAddr2[8]; /*6a0h */ + u8 reserved3[128]; /*6e0h */ + + struct { /*760h */ + u16 minPdRaidLevel_0:4; + u16 maxPdRaidLevel_0:12; + + u16 minPdRaidLevel_1:4; + u16 maxPdRaidLevel_1:12; + + u16 minPdRaidLevel_5:4; + u16 maxPdRaidLevel_5:12; + + u16 minPdRaidLevel_1E:4; + u16 maxPdRaidLevel_1E:12; + + u16 minPdRaidLevel_6:4; + u16 maxPdRaidLevel_6:12; + + u16 minPdRaidLevel_10:4; + u16 maxPdRaidLevel_10:12; + + u16 minPdRaidLevel_50:4; + u16 maxPdRaidLevel_50:12; + + u16 minPdRaidLevel_60:4; + u16 maxPdRaidLevel_60:12; + + u16 minPdRaidLevel_1E_RLQ0:4; + u16 maxPdRaidLevel_1E_RLQ0:12; + + u16 minPdRaidLevel_1E0_RLQ0:4; + u16 maxPdRaidLevel_1E0_RLQ0:12; + + u16 reserved[6]; + } pdsForRaidLevels; + + u16 maxPds; /*780h */ + u16 maxDedHSPs; /*782h */ + u16 maxGlobalHSPs; /*784h */ + u16 ddfSize; /*786h */ + u8 maxLdsPerArray; /*788h */ + u8 partitionsInDDF; /*789h */ + u8 lockKeyBinding; /*78ah */ + u8 maxPITsPerLd; /*78bh */ + u8 maxViewsPerLd; /*78ch */ + u8 maxTargetId; /*78dh */ + u16 maxBvlVdSize; /*78eh */ + + u16 maxConfigurableSSCSize; /*790h */ + u16 currentSSCsize; /*792h */ + + char expanderFwVersion[12]; /*794h */ + + u16 PFKTrialTimeRemaining; /*7A0h */ + + u16 cacheMemorySize; /*7A2h */ + + struct { /*7A4h */ +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:5; + u32 activePassive:2; + u32 supportConfigAutoBalance:1; + u32 mpio:1; + u32 supportDataLDonSSCArray:1; + u32 supportPointInTimeProgress:1; + u32 supportUnevenSpans:1; + u32 dedicatedHotSparesLimited:1; + u32 headlessMode:1; + u32 supportEmulatedDrives:1; + u32 supportResetNow:1; + u32 realTimeScheduler:1; + u32 supportSSDPatrolRead:1; + u32 supportPerfTuning:1; + u32 disableOnlinePFKChange:1; + u32 supportJBOD:1; + u32 supportBootTimePFKChange:1; + u32 supportSetLinkSpeed:1; + u32 supportEmergencySpares:1; + u32 supportSuspendResumeBGops:1; + u32 blockSSDWriteCacheChange:1; + u32 supportShieldState:1; + u32 supportLdBBMInfo:1; + u32 supportLdPIType3:1; + u32 supportLdPIType2:1; + u32 supportLdPIType1:1; + u32 supportPIcontroller:1; +#else + u32 supportPIcontroller:1; + u32 supportLdPIType1:1; + u32 supportLdPIType2:1; + u32 supportLdPIType3:1; + u32 supportLdBBMInfo:1; + u32 supportShieldState:1; + u32 blockSSDWriteCacheChange:1; + u32 supportSuspendResumeBGops:1; + u32 supportEmergencySpares:1; + u32 supportSetLinkSpeed:1; + u32 supportBootTimePFKChange:1; + u32 supportJBOD:1; + u32 disableOnlinePFKChange:1; + u32 supportPerfTuning:1; + u32 supportSSDPatrolRead:1; + u32 realTimeScheduler:1; + + u32 supportResetNow:1; + u32 supportEmulatedDrives:1; + u32 headlessMode:1; + u32 dedicatedHotSparesLimited:1; + + + u32 supportUnevenSpans:1; + u32 supportPointInTimeProgress:1; + u32 supportDataLDonSSCArray:1; + u32 mpio:1; + u32 supportConfigAutoBalance:1; + u32 activePassive:2; + u32 reserved:5; +#endif + } adapterOperations2; + + u8 driverVersion[32]; /*7A8h */ + u8 maxDAPdCountSpinup60; /*7C8h */ + u8 temperatureROC; /*7C9h */ + u8 temperatureCtrl; /*7CAh */ + u8 reserved4; /*7CBh */ + u16 maxConfigurablePds; /*7CCh */ + + + u8 reserved5[2]; /*0x7CDh */ + + /* + * HA cluster information + */ + struct { + u32 peerIsPresent:1; + u32 peerIsIncompatible:1; + u32 hwIncompatible:1; + u32 fwVersionMismatch:1; + u32 ctrlPropIncompatible:1; + u32 premiumFeatureMismatch:1; + u32 reserved:26; + } cluster; + + char clusterId[16]; /*7D4h */ + struct { + u8 maxVFsSupported; /*0x7E4*/ + u8 numVFsEnabled; /*0x7E5*/ + u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/ + u8 reserved; /*0x7E7*/ + } iov; + + u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */ } __packed; /* @@ -712,7 +942,7 @@ struct megasas_ctrl_info { * =============================== */ #define MEGASAS_MAX_PD_CHANNELS 2 -#define MEGASAS_MAX_LD_CHANNELS 2 +#define MEGASAS_MAX_LD_CHANNELS 1 #define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ MEGASAS_MAX_LD_CHANNELS) #define MEGASAS_MAX_DEV_PER_CHANNEL 128 @@ -759,7 +989,7 @@ struct megasas_ctrl_info { #define MEGASAS_INT_CMDS 32 #define MEGASAS_SKINNY_INT_CMDS 5 -#define MEGASAS_MAX_MSIX_QUEUES 16 +#define MEGASAS_MAX_MSIX_QUEUES 128 /* * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit * SGLs based on the size of dma_addr_t @@ -774,7 +1004,9 @@ struct megasas_ctrl_info { #define MFI_OB_INTR_STATUS_MASK 0x00000002 #define MFI_POLL_TIMEOUT_SECS 60 - +#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ) +#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30) +#define MEGASAS_ROUTINE_WAIT_TIME_VF 300 #define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001 #define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004) @@ -784,6 +1016,11 @@ struct megasas_ctrl_info { #define MFI_1068_PCSR_OFFSET 0x84 #define MFI_1068_FW_HANDSHAKE_OFFSET 0x64 #define MFI_1068_FW_READY 0xDDDD0000 + +#define MR_MAX_REPLY_QUEUES_OFFSET 0X0000001F +#define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000 +#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14 +#define MR_MAX_MSIX_REG_ARRAY 16 /* * register set for both 1068 and 1078 controllers * structure extended for 1078 registers @@ -893,6 +1130,21 @@ union megasas_sgl_frame { } __attribute__ ((packed)); +typedef union _MFI_CAPABILITIES { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved:30; + u32 support_additional_msix:1; + u32 support_fp_remote_lun:1; +#else + u32 support_fp_remote_lun:1; + u32 support_additional_msix:1; + u32 reserved:30; +#endif + } mfi_capabilities; + u32 reg; +} MFI_CAPABILITIES; + struct megasas_init_frame { u8 cmd; /*00h */ @@ -900,7 +1152,7 @@ struct megasas_init_frame { u8 cmd_status; /*02h */ u8 reserved_1; /*03h */ - u32 reserved_2; /*04h */ + MFI_CAPABILITIES driver_operations; /*04h*/ u32 context; /*08h */ u32 pad_0; /*0Ch */ @@ -1115,9 +1367,15 @@ struct megasas_cmd; union megasas_evt_class_locale { struct { +#ifndef __BIG_ENDIAN_BITFIELD u16 locale; u8 reserved; s8 class; +#else + s8 class; + u8 reserved; + u16 locale; +#endif } __attribute__ ((packed)) members; u32 word; @@ -1291,14 +1549,20 @@ struct megasas_instance { dma_addr_t producer_h; u32 *consumer; dma_addr_t consumer_h; + struct MR_LD_VF_AFFILIATION *vf_affiliation; + dma_addr_t vf_affiliation_h; + struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111; + dma_addr_t vf_affiliation_111_h; + struct MR_CTRL_HB_HOST_MEM *hb_host_mem; + dma_addr_t hb_host_mem_h; u32 *reply_queue; dma_addr_t reply_queue_h; - unsigned long base_addr; struct megasas_register_set __iomem *reg_set; - + u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; + struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD]; u8 ld_ids[MEGASAS_MAX_LD_IDS]; s8 init_id; @@ -1348,6 +1612,7 @@ struct megasas_instance { u8 flag_ieee; u8 issuepend_done; u8 disableOnlineCtrlReset; + u8 UnevenSpanSupport; u8 adprecovery; unsigned long last_time; u32 mfiStatus; @@ -1365,7 +1630,72 @@ struct megasas_instance { unsigned long bar; long reset_flags; struct mutex reset_mutex; + struct timer_list sriov_heartbeat_timer; + char skip_heartbeat_timer_del; + u8 requestorId; + u64 initiator_sas_address; + u64 ld_sas_address[64]; + char PlasmaFW111; + char mpio; int throttlequeuedepth; + u8 mask_interrupts; + u8 is_imr; +}; +struct MR_LD_VF_MAP { + u32 size; + union MR_LD_REF ref; + u8 ldVfCount; + u8 reserved[6]; + u8 policy[1]; +}; + +struct MR_LD_VF_AFFILIATION { + u32 size; + u8 ldCount; + u8 vfCount; + u8 thisVf; + u8 reserved[9]; + struct MR_LD_VF_MAP map[1]; +}; + +/* Plasma 1.11 FW backward compatibility structures */ +#define IOV_111_OFFSET 0x7CE +#define MAX_VIRTUAL_FUNCTIONS 8 + +struct IOV_111 { + u8 maxVFsSupported; + u8 numVFsEnabled; + u8 requestorId; + u8 reserved[5]; +}; + +struct MR_LD_VF_MAP_111 { + u8 targetId; + u8 reserved[3]; + u8 policy[MAX_VIRTUAL_FUNCTIONS]; +}; + +struct MR_LD_VF_AFFILIATION_111 { + u8 vdCount; + u8 vfCount; + u8 thisVf; + u8 reserved[5]; + struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES]; +}; + +struct MR_CTRL_HB_HOST_MEM { + struct { + u32 fwCounter; /* Firmware heart beat counter */ + struct { + u32 debugmode:1; /* 1=Firmware is in debug mode. + Heart beat will not be updated. */ + u32 reserved:31; + } debug; + u32 reserved_fw[6]; + u32 driverCounter; /* Driver heart beat counter. 0x20 */ + u32 reserved_driver[7]; + } HB; + u8 pad[0x400-0x40]; }; enum { @@ -1374,6 +1704,7 @@ enum { MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2, MEGASAS_ADPRESET_SM_OPERATIONAL = 3, MEGASAS_HW_CRITICAL_ERROR = 4, + MEGASAS_ADPRESET_SM_POLLING = 5, MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD, }; @@ -1381,8 +1712,8 @@ struct megasas_instance_template { void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \ u32, struct megasas_register_set __iomem *); - void (*enable_intr)(struct megasas_register_set __iomem *) ; - void (*disable_intr)(struct megasas_register_set __iomem *); + void (*enable_intr)(struct megasas_instance *); + void (*disable_intr)(struct megasas_instance *); int (*clear_intr)(struct megasas_register_set __iomem *); @@ -1488,7 +1819,16 @@ struct megasas_mgmt_info { int max_index; }; -#define msi_control_reg(base) (base + PCI_MSI_FLAGS) -#define PCI_MSIX_FLAGS_ENABLE (1 << 15) +u8 +MR_BuildRaidContext(struct megasas_instance *instance, + struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); +u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); +struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map); +u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 9d53540207e..112799b131a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : v06.506.00.00-rc1 + * Version : 06.803.01.00-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote @@ -75,6 +75,10 @@ static unsigned int msix_vectors; module_param(msix_vectors, int, S_IRUGO); MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); +static int allow_vf_ioctls; +module_param(allow_vf_ioctls, int, S_IRUGO); +MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); + static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH; module_param(throttlequeuedepth, int, S_IRUGO); MODULE_PARM_DESC(throttlequeuedepth, @@ -92,6 +96,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); static int megasas_get_pd_list(struct megasas_instance *instance); +static int megasas_ld_list_query(struct megasas_instance *instance, + u8 query_type); static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, u32 seq_num, u32 class_locale_word); @@ -120,15 +126,19 @@ static struct pci_device_id megasas_pci_table[] = { /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, /* Fusion */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, + /* Plasma */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, /* Invader */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, + /* Fury */ {} }; MODULE_DEVICE_TABLE(pci, megasas_pci_table); static int megasas_mgmt_majorno; -static struct megasas_mgmt_info megasas_mgmt_info; +struct megasas_mgmt_info megasas_mgmt_info; static struct fasync_struct *megasas_async_queue; static DEFINE_MUTEX(megasas_async_queue_mutex); @@ -167,12 +177,15 @@ megasas_get_map_info(struct megasas_instance *instance); int megasas_sync_map_info(struct megasas_instance *instance); int -wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, + int seconds); void megasas_reset_reply_desc(struct megasas_instance *instance); -u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, - struct LD_LOAD_BALANCE_INFO *lbInfo); -int megasas_reset_fusion(struct Scsi_Host *shost); +int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout); void megasas_fusion_ocr_wq(struct work_struct *work); +static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, + int initial); +int megasas_check_mpio_paths(struct megasas_instance *instance, + struct scsi_cmnd *scmd); void megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) @@ -222,7 +235,9 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) cmd->scmd = NULL; cmd->frame_count = 0; if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && + (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) && (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && + (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && (reset_devices)) cmd->frame->hdr.cmd = MFI_CMD_INVALID; list_add_tail(&cmd->list, &instance->cmd_pool); @@ -241,8 +256,10 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) * @regs: MFI register set */ static inline void -megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs) +megasas_enable_intr_xscale(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; writel(0, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ @@ -254,9 +271,11 @@ megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs) * @regs: MFI register set */ static inline void -megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs) +megasas_disable_intr_xscale(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; u32 mask = 0x1f; + regs = instance->reg_set; writel(mask, ®s->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); @@ -369,13 +388,11 @@ static int megasas_check_reset_xscale(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { - u32 consumer; - consumer = *instance->consumer; if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && - (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { + (le32_to_cpu(*instance->consumer) == + MEGASAS_ADPRESET_INPROG_SIGN)) return 1; - } return 0; } @@ -410,8 +427,10 @@ static struct megasas_instance_template megasas_instance_template_xscale = { * @regs: MFI register set */ static inline void -megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) +megasas_enable_intr_ppc(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); writel(~0x80000000, &(regs)->outbound_intr_mask); @@ -425,9 +444,11 @@ megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) * @regs: MFI register set */ static inline void -megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs) +megasas_disable_intr_ppc(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; + regs = instance->reg_set; writel(mask, ®s->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); @@ -528,8 +549,10 @@ static struct megasas_instance_template megasas_instance_template_ppc = { * @regs: MFI register set */ static inline void -megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs) +megasas_enable_intr_skinny(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); @@ -543,9 +566,11 @@ megasas_enable_intr_skinny(struct megasas_register_set __iomem *regs) * @regs: MFI register set */ static inline void -megasas_disable_intr_skinny(struct megasas_register_set __iomem *regs) +megasas_disable_intr_skinny(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; + regs = instance->reg_set; writel(mask, ®s->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); @@ -583,7 +608,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) /* * Check if it is our interrupt */ - if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) == + if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) == MFI_STATE_FAULT) { mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; } else @@ -616,9 +641,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, { unsigned long flags; spin_lock_irqsave(&instance->hba_lock, flags); - writel(0, &(regs)->inbound_high_queue_port); - writel((frame_phys_addr | (frame_count<<1))|1, - &(regs)->inbound_low_queue_port); + writel(upper_32_bits(frame_phys_addr), + &(regs)->inbound_high_queue_port); + writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, + &(regs)->inbound_low_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } @@ -663,8 +689,10 @@ static struct megasas_instance_template megasas_instance_template_skinny = { * @regs: MFI register set */ static inline void -megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs) +megasas_enable_intr_gen2(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); /* write ~0x00000005 (4 & 1) to the intr mask*/ @@ -679,9 +707,11 @@ megasas_enable_intr_gen2(struct megasas_register_set __iomem *regs) * @regs: MFI register set */ static inline void -megasas_disable_intr_gen2(struct megasas_register_set __iomem *regs) +megasas_disable_intr_gen2(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; u32 mask = 0xFFFFFFFF; + regs = instance->reg_set; writel(mask, ®s->outbound_intr_mask); /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); @@ -711,7 +741,7 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs) */ status = readl(®s->outbound_intr_status); - if (status & MFI_GEN2_ENABLE_INTERRUPT_MASK) { + if (status & MFI_INTR_FLAG_REPLY_MESSAGE) { mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; } if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) { @@ -859,11 +889,12 @@ extern struct megasas_instance_template megasas_instance_template_fusion; int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) { + int seconds; struct megasas_header *frame_hdr = &cmd->frame->hdr; - frame_hdr->cmd_status = 0xFF; - frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); /* * Issue the frame using inbound queue port @@ -873,13 +904,18 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) /* * Wait for cmd_status to change */ - return wait_and_poll(instance, cmd); + if (instance->requestorId) + seconds = MEGASAS_ROUTINE_WAIT_TIME_VF; + else + seconds = MFI_POLL_TIMEOUT_SECS; + return wait_and_poll(instance, cmd, seconds); } /** * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds * @instance: Adapter soft state * @cmd: Command to be issued + * @timeout: Timeout in seconds * * This function waits on an event for the command to be returned from ISR. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs @@ -887,13 +923,20 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) */ static int megasas_issue_blocked_cmd(struct megasas_instance *instance, - struct megasas_cmd *cmd) + struct megasas_cmd *cmd, int timeout) { + int ret = 0; cmd->cmd_status = ENODATA; instance->instancet->issue_dcmd(instance, cmd); - - wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); + if (timeout) { + ret = wait_event_timeout(instance->int_cmd_wait_q, + cmd->cmd_status != ENODATA, timeout * HZ); + if (!ret) + return 1; + } else + wait_event(instance->int_cmd_wait_q, + cmd->cmd_status != ENODATA); return 0; } @@ -902,18 +945,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd * @instance: Adapter soft state * @cmd_to_abort: Previously issued cmd to be aborted + * @timeout: Timeout in seconds * - * MFI firmware can abort previously issued AEN command (automatic event + * MFI firmware can abort previously issued AEN comamnd (automatic event * notification). The megasas_issue_blocked_abort_cmd() issues such abort * cmd and waits for return status. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs */ static int megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, - struct megasas_cmd *cmd_to_abort) + struct megasas_cmd *cmd_to_abort, int timeout) { struct megasas_cmd *cmd; struct megasas_abort_frame *abort_fr; + int ret = 0; cmd = megasas_get_cmd(instance); @@ -927,20 +972,30 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; - abort_fr->flags = 0; - abort_fr->abort_context = cmd_to_abort->index; - abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; - abort_fr->abort_mfi_phys_addr_hi = 0; + abort_fr->flags = cpu_to_le16(0); + abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); + abort_fr->abort_mfi_phys_addr_lo = + cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); + abort_fr->abort_mfi_phys_addr_hi = + cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; instance->instancet->issue_dcmd(instance, cmd); - /* - * Wait for this cmd to complete - */ - wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF); + if (timeout) { + ret = wait_event_timeout(instance->abort_cmd_wait_q, + cmd->cmd_status != ENODATA, timeout * HZ); + if (!ret) { + dev_err(&instance->pdev->dev, "Command timedout" + "from %s\n", __func__); + return 1; + } + } else + wait_event(instance->abort_cmd_wait_q, + cmd->cmd_status != ENODATA); + cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); @@ -969,8 +1024,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { - mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); - mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); + mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); + mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); } } return sge_count; @@ -998,8 +1053,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { - mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); - mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); + mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); + mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); } } return sge_count; @@ -1026,10 +1081,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance, if (sge_count) { scsi_for_each_sg(scp, os_sgl, sge_count, i) { - mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); + mfi_sgl->sge_skinny[i].length = + cpu_to_le32(sg_dma_len(os_sgl)); mfi_sgl->sge_skinny[i].phys_addr = - sg_dma_address(os_sgl); - mfi_sgl->sge_skinny[i].flag = 0; + cpu_to_le64(sg_dma_address(os_sgl)); + mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); } } return sge_count; @@ -1138,8 +1194,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, pthru->cdb_len = scp->cmd_len; pthru->timeout = 0; pthru->pad_0 = 0; - pthru->flags = flags; - pthru->data_xfer_len = scsi_bufflen(scp); + pthru->flags = cpu_to_le16(flags); + pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); @@ -1151,18 +1207,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, if ((scp->request->timeout / HZ) > 0xFFFF) pthru->timeout = 0xFFFF; else - pthru->timeout = scp->request->timeout / HZ; + pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); } /* * Construct SGL */ if (instance->flag_ieee == 1) { - pthru->flags |= MFI_FRAME_SGL64; + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl_skinny(instance, scp, &pthru->sgl); } else if (IS_DMA64) { - pthru->flags |= MFI_FRAME_SGL64; + pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); pthru->sge_count = megasas_make_sgl64(instance, scp, &pthru->sgl); } else @@ -1179,8 +1235,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, * Sense info specific */ pthru->sense_len = SCSI_SENSE_BUFFERSIZE; - pthru->sense_buf_phys_addr_hi = 0; - pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + pthru->sense_buf_phys_addr_hi = + cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); + pthru->sense_buf_phys_addr_lo = + cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); /* * Compute the total number of frames this command consumes. FW uses @@ -1231,7 +1289,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, ldio->timeout = 0; ldio->reserved_0 = 0; ldio->pad_0 = 0; - ldio->flags = flags; + ldio->flags = cpu_to_le16(flags); ldio->start_lba_hi = 0; ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; @@ -1239,52 +1297,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, * 6-byte READ(0x08) or WRITE(0x0A) cdb */ if (scp->cmd_len == 6) { - ldio->lba_count = (u32) scp->cmnd[4]; - ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | - ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | + ((u32) scp->cmnd[2] << 8) | + (u32) scp->cmnd[3]); - ldio->start_lba_lo &= 0x1FFFFF; + ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); } /* * 10-byte READ(0x28) or WRITE(0x2A) cdb */ else if (scp->cmd_len == 10) { - ldio->lba_count = (u32) scp->cmnd[8] | - ((u32) scp->cmnd[7] << 8); - ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | - ((u32) scp->cmnd[3] << 16) | - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | + ((u32) scp->cmnd[7] << 8)); + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); } /* * 12-byte READ(0xA8) or WRITE(0xAA) cdb */ else if (scp->cmd_len == 12) { - ldio->lba_count = ((u32) scp->cmnd[6] << 24) | - ((u32) scp->cmnd[7] << 16) | - ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | + (u32) scp->cmnd[9]); - ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | - ((u32) scp->cmnd[3] << 16) | - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); } /* * 16-byte READ(0x88) or WRITE(0x8A) cdb */ else if (scp->cmd_len == 16) { - ldio->lba_count = ((u32) scp->cmnd[10] << 24) | - ((u32) scp->cmnd[11] << 16) | - ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; + ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | + ((u32) scp->cmnd[11] << 16) | + ((u32) scp->cmnd[12] << 8) | + (u32) scp->cmnd[13]); - ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | - ((u32) scp->cmnd[7] << 16) | - ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | + (u32) scp->cmnd[9]); - ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | - ((u32) scp->cmnd[3] << 16) | - ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | + (u32) scp->cmnd[5]); } @@ -1292,11 +1357,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, * Construct SGL */ if (instance->flag_ieee) { - ldio->flags |= MFI_FRAME_SGL64; + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl_skinny(instance, scp, &ldio->sgl); } else if (IS_DMA64) { - ldio->flags |= MFI_FRAME_SGL64; + ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); } else ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); @@ -1312,7 +1377,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, */ ldio->sense_len = SCSI_SENSE_BUFFERSIZE; ldio->sense_buf_phys_addr_hi = 0; - ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); /* * Compute the total number of frames this command consumes. FW uses @@ -1383,20 +1448,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance) ldio = (struct megasas_io_frame *)cmd->frame; mfi_sgl = &ldio->sgl; sgcount = ldio->sge_count; - printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); + printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," + " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", + instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, + le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), + le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); } else { pthru = (struct megasas_pthru_frame *) cmd->frame; mfi_sgl = &pthru->sgl; sgcount = pthru->sge_count; - printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); + printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " + "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", + instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, + pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), + le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); } if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ for (n = 0; n < sgcount; n++){ if (IS_DMA64) - printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; + printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ", + le32_to_cpu(mfi_sgl->sge64[n].length), + le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); else - printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; + printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ", + le32_to_cpu(mfi_sgl->sge32[n].length), + le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); } } printk(KERN_ERR "\n"); @@ -1471,6 +1548,28 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd return SCSI_MLQUEUE_HOST_BUSY; spin_lock_irqsave(&instance->hba_lock, flags); + + /* Check for an mpio path and adjust behavior */ + if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { + if (megasas_check_mpio_paths(instance, scmd) == + (DID_RESET << 16)) { + spin_unlock_irqrestore(&instance->hba_lock, flags); + return SCSI_MLQUEUE_HOST_BUSY; + } else { + spin_unlock_irqrestore(&instance->hba_lock, flags); + scmd->result = DID_NO_CONNECT << 16; + done(scmd); + return 0; + } + } + + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { + spin_unlock_irqrestore(&instance->hba_lock, flags); + scmd->result = DID_NO_CONNECT << 16; + done(scmd); + return 0; + } + if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { spin_unlock_irqrestore(&instance->hba_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; @@ -1591,8 +1690,14 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance) if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) { + (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); + /* Flush */ + readl(&instance->reg_set->doorbell); + if (instance->mpio && instance->requestorId) + memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); } @@ -1615,10 +1720,7 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance) spin_lock_irqsave(instance->host->host_lock, flags); instance->flag &= ~MEGASAS_FW_BUSY; - if ((instance->pdev->device == - PCI_DEVICE_ID_LSI_SAS0073SKINNY) || - (instance->pdev->device == - PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + if (instance->is_imr) { instance->host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else @@ -1651,11 +1753,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) spin_lock_irqsave(&instance->completion_lock, flags); - producer = *instance->producer; - consumer = *instance->consumer; + producer = le32_to_cpu(*instance->producer); + consumer = le32_to_cpu(*instance->consumer); while (consumer != producer) { - context = instance->reply_queue[consumer]; + context = le32_to_cpu(instance->reply_queue[consumer]); if (context >= instance->max_fw_cmds) { printk(KERN_ERR "Unexpected context value %x\n", context); @@ -1672,7 +1774,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) } } - *instance->consumer = producer; + *instance->consumer = cpu_to_le32(producer); spin_unlock_irqrestore(&instance->completion_lock, flags); @@ -1682,6 +1784,25 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) megasas_check_and_restore_queue_depth(instance); } +/** + * megasas_start_timer - Initializes a timer object + * @instance: Adapter soft state + * @timer: timer object to be initialized + * @fn: timer function + * @interval: time interval between timer function call + * + */ +void megasas_start_timer(struct megasas_instance *instance, + struct timer_list *timer, + void *fn, unsigned long interval) +{ + init_timer(timer); + timer->expires = jiffies + interval; + timer->data = (unsigned long)instance; + timer->function = fn; + add_timer(timer); +} + static void megasas_internal_reset_defer_cmds(struct megasas_instance *instance); @@ -1693,9 +1814,9 @@ void megasas_do_ocr(struct megasas_instance *instance) if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { - *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; + *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->issuepend_done = 0; @@ -1704,6 +1825,295 @@ void megasas_do_ocr(struct megasas_instance *instance) process_fw_state_change_wq(&instance->work_init); } +/* This function will get the current SR-IOV LD/VF affiliation */ +static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, + int initial) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; + struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; + struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; + dma_addr_t new_affiliation_h; + dma_addr_t new_affiliation_111_h; + int ld, retval = 0; + u8 thisVf; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_DEBUG "megasas: megasas_get_ld_vf_" + "affiliation: Failed to get cmd for scsi%d.\n", + instance->host->host_no); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + if (!instance->vf_affiliation && !instance->vf_affiliation_111) { + printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF " + "affiliation for scsi%d.\n", instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + if (initial) + if (instance->PlasmaFW111) + memset(instance->vf_affiliation_111, 0, + sizeof(struct MR_LD_VF_AFFILIATION_111)); + else + memset(instance->vf_affiliation, 0, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + else { + if (instance->PlasmaFW111) + new_affiliation_111 = + pci_alloc_consistent(instance->pdev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + &new_affiliation_111_h); + else + new_affiliation = + pci_alloc_consistent(instance->pdev, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + &new_affiliation_h); + if (!new_affiliation && !new_affiliation_111) { + printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate " + "memory for new affiliation for scsi%d.\n", + instance->host->host_no); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + if (instance->PlasmaFW111) + memset(new_affiliation_111, 0, + sizeof(struct MR_LD_VF_AFFILIATION_111)); + else + memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION)); + } + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_BOTH; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + if (instance->PlasmaFW111) { + dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111); + dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111; + } else { + dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION); + dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS; + } + + if (initial) { + if (instance->PlasmaFW111) + dcmd->sgl.sge32[0].phys_addr = + instance->vf_affiliation_111_h; + else + dcmd->sgl.sge32[0].phys_addr = + instance->vf_affiliation_h; + } else { + if (instance->PlasmaFW111) + dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h; + else + dcmd->sgl.sge32[0].phys_addr = new_affiliation_h; + } + if (instance->PlasmaFW111) + dcmd->sgl.sge32[0].length = + sizeof(struct MR_LD_VF_AFFILIATION_111); + else + dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION); + + printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " + "scsi%d\n", instance->host->host_no); + + megasas_issue_blocked_cmd(instance, cmd, 0); + + if (dcmd->cmd_status) { + printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD" + " failed with status 0x%x for scsi%d.\n", + dcmd->cmd_status, instance->host->host_no); + retval = 1; /* Do a scan if we couldn't get affiliation */ + goto out; + } + + if (!initial) { + if (instance->PlasmaFW111) { + if (!new_affiliation_111->vdCount) { + printk(KERN_WARNING "megasas: SR-IOV: Got new " + "LD/VF affiliation for passive path " + "for scsi%d.\n", + instance->host->host_no); + retval = 1; + goto out; + } + thisVf = new_affiliation_111->thisVf; + for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) + if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) { + printk(KERN_WARNING "megasas: SR-IOV: " + "Got new LD/VF affiliation " + "for scsi%d.\n", + instance->host->host_no); + memcpy(instance->vf_affiliation_111, + new_affiliation_111, + sizeof(struct MR_LD_VF_AFFILIATION_111)); + retval = 1; + goto out; + } + } else { + if (!new_affiliation->ldCount) { + printk(KERN_WARNING "megasas: SR-IOV: Got new " + "LD/VF affiliation for passive " + "path for scsi%d.\n", + instance->host->host_no); + retval = 1; + goto out; + } + newmap = new_affiliation->map; + savedmap = instance->vf_affiliation->map; + thisVf = new_affiliation->thisVf; + for (ld = 0 ; ld < new_affiliation->ldCount; ld++) { + if (savedmap->policy[thisVf] != + newmap->policy[thisVf]) { + printk(KERN_WARNING "megasas: SR-IOV: " + "Got new LD/VF affiliation " + "for scsi%d.\n", + instance->host->host_no); + memcpy(instance->vf_affiliation, + new_affiliation, + new_affiliation->size); + retval = 1; + goto out; + } + savedmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)savedmap + + savedmap->size); + newmap = (struct MR_LD_VF_MAP *) + ((unsigned char *)newmap + + newmap->size); + } + } + } +out: + if (new_affiliation) { + if (instance->PlasmaFW111) + pci_free_consistent(instance->pdev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + new_affiliation_111, + new_affiliation_111_h); + else + pci_free_consistent(instance->pdev, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + new_affiliation, new_affiliation_h); + } + megasas_return_cmd(instance, cmd); + + return retval; +} + +/* This function will tell FW to start the SR-IOV heartbeat */ +int megasas_sriov_start_heartbeat(struct megasas_instance *instance, + int initial) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + int retval = 0; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: " + "Failed to get cmd for scsi%d.\n", + instance->host->host_no); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + if (initial) { + instance->hb_host_mem = + pci_alloc_consistent(instance->pdev, + sizeof(struct MR_CTRL_HB_HOST_MEM), + &instance->hb_host_mem_h); + if (!instance->hb_host_mem) { + printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate" + " memory for heartbeat host memory for " + "scsi%d.\n", instance->host->host_no); + retval = -ENOMEM; + goto out; + } + memset(instance->hb_host_mem, 0, + sizeof(struct MR_CTRL_HB_HOST_MEM)); + } + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM); + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_BOTH; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM); + dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC; + dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h; + dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM); + + printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n", + instance->host->host_no); + + if (!megasas_issue_polled(instance, cmd)) { + retval = 0; + } else { + printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" + "_MEM_ALLOC DCMD timed out for scsi%d\n", + instance->host->host_no); + retval = 1; + goto out; + } + + + if (dcmd->cmd_status) { + printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" + "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n", + dcmd->cmd_status, + instance->host->host_no); + retval = 1; + goto out; + } + +out: + megasas_return_cmd(instance, cmd); + + return retval; +} + +/* Handler for SR-IOV heartbeat */ +void megasas_sriov_heartbeat_handler(unsigned long instance_addr) +{ + struct megasas_instance *instance = + (struct megasas_instance *)instance_addr; + + if (instance->hb_host_mem->HB.fwCounter != + instance->hb_host_mem->HB.driverCounter) { + instance->hb_host_mem->HB.driverCounter = + instance->hb_host_mem->HB.fwCounter; + mod_timer(&instance->sriov_heartbeat_timer, + jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + } else { + printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never " + "completed for scsi%d\n", instance->host->host_no); + schedule_work(&instance->work_init); + } +} + /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state @@ -1966,8 +2376,10 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) * First wait for all commands to complete */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) - ret = megasas_reset_fusion(scmd->device->host); + (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) + ret = megasas_reset_fusion(scmd->device->host, 1); else ret = megasas_generic_reset(scmd); @@ -2099,6 +2511,7 @@ static struct scsi_host_template megasas_template = { .bios_param = megasas_bios_param, .use_clustering = ENABLE_CLUSTERING, .change_queue_depth = megasas_change_queue_depth, + .no_write_same = 1, }; /** @@ -2162,6 +2575,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, struct megasas_header *hdr = &cmd->frame->hdr; unsigned long flags; struct fusion_context *fusion = instance->ctrl_context; + u32 opcode; /* flag for the retry reset */ cmd->retry_for_fw_reset = 0; @@ -2263,9 +2677,11 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: + opcode = le32_to_cpu(cmd->frame->dcmd.opcode); /* Check for LD map update */ - if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && - (cmd->frame->dcmd.mbox.b[1] == 1)) { + if ((opcode == MR_DCMD_LD_MAP_GET_INFO) + && (cmd->frame->dcmd.mbox.b[1] == 1)) { + fusion->fast_path_io = 0; spin_lock_irqsave(instance->host->host_lock, flags); if (cmd->frame->hdr.cmd_status != 0) { if (cmd->frame->hdr.cmd_status != @@ -2283,9 +2699,13 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, } else instance->map_id++; megasas_return_cmd(instance, cmd); - if (MR_ValidateMapInfo( - fusion->ld_map[(instance->map_id & 1)], - fusion->load_balance_info)) + + /* + * Set fast path IO to ZERO. + * Validate Map will set proper value. + * Meanwhile all IOs will go as LD IO. + */ + if (MR_ValidateMapInfo(instance)) fusion->fast_path_io = 1; else fusion->fast_path_io = 0; @@ -2294,8 +2714,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, flags); break; } - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || - cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { + if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || + opcode == MR_DCMD_CTRL_EVENT_GET) { spin_lock_irqsave(&poll_aen_lock, flags); megasas_poll_wait_aen = 0; spin_unlock_irqrestore(&poll_aen_lock, flags); @@ -2304,7 +2724,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, /* * See if got an event notification */ - if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) + if (opcode == MR_DCMD_CTRL_EVENT_WAIT) megasas_service_aen(instance, cmd); else megasas_complete_int_cmd(instance, cmd); @@ -2477,7 +2897,7 @@ process_fw_state_change_wq(struct work_struct *work) printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault" "state, restarting it...\n"); - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); atomic_set(&instance->fw_outstanding, 0); atomic_set(&instance->fw_reset_no_pci_access, 1); @@ -2518,7 +2938,7 @@ process_fw_state_change_wq(struct work_struct *work) spin_lock_irqsave(&instance->hba_lock, flags); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; spin_unlock_irqrestore(&instance->hba_lock, flags); - instance->instancet->enable_intr(instance->reg_set); + instance->instancet->enable_intr(instance); megasas_issue_pending_cmds_again(instance); instance->issuepend_done = 1; @@ -2577,11 +2997,11 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, PCI_DEVICE_ID_LSI_VERDE_ZCR)) { *instance->consumer = - MEGASAS_ADPRESET_INPROG_SIGN; + cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); } - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; instance->issuepend_done = 0; @@ -2641,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) u32 cur_state; u32 abs_state, curr_abs_state; - fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; + abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); + fw_state = abs_state & MFI_STATE_MASK; if (fw_state != MFI_STATE_READY) printk(KERN_INFO "megasas: Waiting for FW to come to ready" @@ -2649,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) while (fw_state != MFI_STATE_READY) { - abs_state = - instance->instancet->read_fw_status_reg(instance->reg_set); - switch (fw_state) { case MFI_STATE_FAULT: @@ -2672,9 +3090,13 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == - PCI_DEVICE_ID_LSI_FUSION) || + PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_PLASMA) || (instance->pdev->device == - PCI_DEVICE_ID_LSI_INVADER)) { + PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_FURY)) { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); @@ -2696,7 +3118,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device == - PCI_DEVICE_ID_LSI_INVADER)) { + PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_FURY)) { writel(MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); } else @@ -2711,7 +3137,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) /* * Bring it to READY state; assuming max wait 10 secs */ - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == @@ -2719,13 +3145,21 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || (instance->pdev->device - == PCI_DEVICE_ID_LSI_INVADER)) { + == PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device + == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device + == PCI_DEVICE_ID_LSI_FURY)) { writel(MFI_RESET_FLAGS, &instance->reg_set->doorbell); if ((instance->pdev->device == - PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == - PCI_DEVICE_ID_LSI_INVADER)) { + PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_FURY)) { for (i = 0; i < (10 * 1000); i += 20) { if (readl( &instance-> @@ -2787,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { - fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & - MFI_STATE_MASK ; - curr_abs_state = - instance->instancet->read_fw_status_reg(instance->reg_set); + curr_abs_state = instance->instancet-> + read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { msleep(1); @@ -2806,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) "in %d secs\n", fw_state, max_wait); return -ENODEV; } + + abs_state = curr_abs_state; + fw_state = curr_abs_state & MFI_STATE_MASK; } printk(KERN_INFO "megasas: FW now in Ready state\n"); @@ -2946,10 +3381,12 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) } memset(cmd->frame, 0, total_sz); - cmd->frame->io.context = cmd->index; + cmd->frame->io.context = cpu_to_le32(cmd->index); cmd->frame->io.pad_0 = 0; if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && + (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) && (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && + (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && (reset_devices)) cmd->frame->hdr.cmd = MFI_CMD_INVALID; } @@ -3105,13 +3542,13 @@ megasas_get_pd_list(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); - dcmd->opcode = MR_DCMD_PD_LIST_QUERY; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); + dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); if (!megasas_issue_polled(instance, cmd)) { ret = 0; @@ -3126,22 +3563,24 @@ megasas_get_pd_list(struct megasas_instance *instance) pd_addr = ci->addr; if ( ret == 0 && - (ci->count < + (le32_to_cpu(ci->count) < (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { - memset(instance->pd_list, 0, + memset(instance->local_pd_list, 0, MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); - for (pd_index = 0; pd_index < ci->count; pd_index++) { + for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { - instance->pd_list[pd_addr->deviceId].tid = - pd_addr->deviceId; - instance->pd_list[pd_addr->deviceId].driveType = + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid = + le16_to_cpu(pd_addr->deviceId); + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType = pd_addr->scsiDevType; - instance->pd_list[pd_addr->deviceId].driveState = + instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } + memcpy(instance->pd_list, instance->local_pd_list, + sizeof(instance->pd_list)); } pci_free_consistent(instance->pdev, @@ -3169,6 +3608,7 @@ megasas_get_ld_list(struct megasas_instance *instance) struct megasas_dcmd_frame *dcmd; struct MR_LD_LIST *ci; dma_addr_t ci_h = 0; + u32 ld_count; cmd = megasas_get_cmd(instance); @@ -3195,12 +3635,12 @@ megasas_get_ld_list(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; - dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); - dcmd->opcode = MR_DCMD_LD_GET_LIST; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); dcmd->pad_0 = 0; if (!megasas_issue_polled(instance, cmd)) { @@ -3209,12 +3649,14 @@ megasas_get_ld_list(struct megasas_instance *instance) ret = -1; } + ld_count = le32_to_cpu(ci->ldCount); + /* the following function will get the instance PD LIST */ - if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { + if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) { memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); - for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { + for (ld_index = 0; ld_index < ld_count; ld_index++) { if (ci->ldList[ld_index].state != 0) { ids = ci->ldList[ld_index].ref.targetId; instance->ld_ids[ids] = @@ -3233,6 +3675,87 @@ megasas_get_ld_list(struct megasas_instance *instance) } /** + * megasas_ld_list_query - Returns FW's ld_list structure + * @instance: Adapter soft state + * @ld_list: ld_list structure + * + * Issues an internal command (DCMD) to get the FW's controller PD + * list structure. This information is mainly used to find out SYSTEM + * supported by the FW. + */ +static int +megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) +{ + int ret = 0, ld_index = 0, ids = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_LD_TARGETID_LIST *ci; + dma_addr_t ci_h = 0; + u32 tgtid_count; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_WARNING + "megasas:(megasas_ld_list_query): Failed to get cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + ci = pci_alloc_consistent(instance->pdev, + sizeof(struct MR_LD_TARGETID_LIST), &ci_h); + + if (!ci) { + printk(KERN_WARNING + "megasas: Failed to alloc mem for ld_list_query\n"); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.b[0] = query_type; + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); + dcmd->timeout = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); + dcmd->pad_0 = 0; + + if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) { + ret = 0; + } else { + /* On failure, call older LD list DCMD */ + ret = 1; + } + + tgtid_count = le32_to_cpu(ci->count); + + if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) { + memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + for (ld_index = 0; ld_index < tgtid_count; ld_index++) { + ids = ci->targetId[ld_index]; + instance->ld_ids[ids] = ci->targetId[ld_index]; + } + + } + + pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), + ci, ci_h); + + megasas_return_cmd(instance, cmd); + + return ret; +} + +/** * megasas_get_controller_info - Returns FW's controller structure * @instance: Adapter soft state * @ctrl_info: Controller information structure @@ -3275,13 +3798,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); - dcmd->opcode = MR_DCMD_CTRL_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); if (!megasas_issue_polled(instance, cmd)) { ret = 0; @@ -3337,22 +3860,25 @@ megasas_issue_init_mfi(struct megasas_instance *instance) memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); init_frame->context = context; - initq_info->reply_queue_entries = instance->max_fw_cmds + 1; - initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; + initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); + initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); - initq_info->producer_index_phys_addr_lo = instance->producer_h; - initq_info->consumer_index_phys_addr_lo = instance->consumer_h; + initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); + initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; - init_frame->queue_info_new_phys_addr_lo = initq_info_h; + init_frame->queue_info_new_phys_addr_lo = + cpu_to_le32(lower_32_bits(initq_info_h)); + init_frame->queue_info_new_phys_addr_hi = + cpu_to_le32(upper_32_bits(initq_info_h)); - init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); /* * disable the intr before firing the init frame to FW */ - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); /* * Issue the init frame in polled mode @@ -3459,23 +3985,25 @@ static int megasas_init_fw(struct megasas_instance *instance) { u32 max_sectors_1; u32 max_sectors_2; - u32 tmp_sectors, msix_enable; + u32 tmp_sectors, msix_enable, scratch_pad_2; + resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info; unsigned long bar_list; - int i; + int i, loop, fw_msix_count = 0; + struct IOV_111 *iovPtr; /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); - instance->base_addr = pci_resource_start(instance->pdev, instance->bar); if (pci_request_selected_regions(instance->pdev, instance->bar, "megasas: LSI")) { printk(KERN_DEBUG "megasas: IO memory region busy!\n"); return -EBUSY; } - instance->reg_set = ioremap_nocache(instance->base_addr, 8192); + base_addr = pci_resource_start(instance->pdev, instance->bar); + instance->reg_set = ioremap_nocache(base_addr, 8192); if (!instance->reg_set) { printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); @@ -3486,7 +4014,9 @@ static int megasas_init_fw(struct megasas_instance *instance) switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_PLASMA: case PCI_DEVICE_ID_LSI_INVADER: + case PCI_DEVICE_ID_LSI_FURY: instance->instancet = &megasas_instance_template_fusion; break; case PCI_DEVICE_ID_LSI_SAS1078R: @@ -3508,26 +4038,66 @@ static int megasas_init_fw(struct megasas_instance *instance) break; } + if (megasas_transition_to_ready(instance, 0)) { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + dev_info(&instance->pdev->dev, + "megasas: FW restarted successfully from %s!\n", + __func__); + + /*waitting for about 30 second before retry*/ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } + /* - * We expect the FW state to be READY + * MSI-X host index 0 is common for all adapter. + * It is used for all MPT based Adapters. */ - if (megasas_transition_to_ready(instance, 0)) - goto fail_ready_state; + instance->reply_post_host_index_addr[0] = + (u32 *)((u8 *)instance->reg_set + + MPI2_REPLY_POST_HOST_INDEX_OFFSET); /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 0x4000000) >> 0x1a; if (msix_enable && !msix_disable) { + scratch_pad_2 = readl + (&instance->reg_set->outbound_scratch_pad_2); /* Check max MSI-X vectors */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) { - instance->msix_vectors = (readl(&instance->reg_set-> - outbound_scratch_pad_2 - ) & 0x1F) + 1; + (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) { + instance->msix_vectors = (scratch_pad_2 + & MR_MAX_REPLY_QUEUES_OFFSET) + 1; + fw_msix_count = instance->msix_vectors; if (msix_vectors) instance->msix_vectors = min(msix_vectors, instance->msix_vectors); + } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) + || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { + /* Invader/Fury supports more than 8 MSI-X */ + instance->msix_vectors = ((scratch_pad_2 + & MR_MAX_REPLY_QUEUES_EXT_OFFSET) + >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; + fw_msix_count = instance->msix_vectors; + /* Save 1-15 reply post index address to local memory + * Index 0 is already saved from reg offset + * MPI2_REPLY_POST_HOST_INDEX_OFFSET + */ + for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { + instance->reply_post_host_index_addr[loop] = + (u32 *)((u8 *)instance->reg_set + + MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + + (loop * 0x10)); + } + if (msix_vectors) + instance->msix_vectors = min(msix_vectors, + instance->msix_vectors); } else instance->msix_vectors = 1; /* Don't bother allocating more MSI-X vectors than cpus */ @@ -3547,6 +4117,12 @@ static int megasas_init_fw(struct megasas_instance *instance) } } else instance->msix_vectors = 0; + + dev_info(&instance->pdev->dev, "[scsi%d]: FW supports" + "<%d> MSIX vector,Online CPUs: <%d>," + "Current MSIX <%d>\n", instance->host->host_no, + fw_msix_count, (unsigned int)num_online_cpus(), + instance->msix_vectors); } /* Get operational params, sge flags, send init cmd to controller */ @@ -3561,10 +4137,15 @@ static int megasas_init_fw(struct megasas_instance *instance) memset(instance->pd_list, 0 , (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); - megasas_get_pd_list(instance); + if (megasas_get_pd_list(instance) < 0) { + printk(KERN_ERR "megasas: failed to get PD list\n"); + goto fail_init_adapter; + } memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); - megasas_get_ld_list(instance); + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); @@ -3581,14 +4162,56 @@ static int megasas_init_fw(struct megasas_instance *instance) if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * - ctrl_info->max_strips_per_io; - max_sectors_2 = ctrl_info->max_request_size; + le16_to_cpu(ctrl_info->max_strips_per_io); + max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); + + /*Check whether controller is iMR or MR */ + if (ctrl_info->memory_size) { + instance->is_imr = 0; + dev_info(&instance->pdev->dev, "Controller type: MR," + "Memory size is: %dMB\n", + le16_to_cpu(ctrl_info->memory_size)); + } else { + instance->is_imr = 1; + dev_info(&instance->pdev->dev, + "Controller type: iMR\n"); + } + /* OnOffProperties are converted into CPU arch*/ + le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); instance->disableOnlineCtrlReset = ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; - } + /* adapterOperations2 are converted into CPU arch*/ + le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); + instance->mpio = ctrl_info->adapterOperations2.mpio; + instance->UnevenSpanSupport = + ctrl_info->adapterOperations2.supportUnevenSpans; + if (instance->UnevenSpanSupport) { + struct fusion_context *fusion = instance->ctrl_context; + dev_info(&instance->pdev->dev, "FW supports: " + "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport); + if (MR_ValidateMapInfo(instance)) + fusion->fast_path_io = 1; + else + fusion->fast_path_io = 0; + } + if (ctrl_info->host_interface.SRIOV) { + if (!ctrl_info->adapterOperations2.activePassive) + instance->PlasmaFW111 = 1; + + if (!instance->PlasmaFW111) + instance->requestorId = + ctrl_info->iov.requestorId; + else { + iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET); + instance->requestorId = iovPtr->requestorId; + } + printk(KERN_WARNING "megaraid_sas: I am VF " + "requestorId %d\n", instance->requestorId); + } + } instance->max_sectors_per_req = instance->max_num_sge * PAGE_SIZE / 512; if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) @@ -3597,8 +4220,7 @@ static int megasas_init_fw(struct megasas_instance *instance) kfree(ctrl_info); /* Check for valid throttlequeuedepth module parameter */ - if (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY || - instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) { + if (instance->is_imr) { if (throttlequeuedepth > (instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS)) instance->throttlequeuedepth = @@ -3621,6 +4243,17 @@ static int megasas_init_fw(struct megasas_instance *instance) tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); + /* Launch SR-IOV heartbeat timer */ + if (instance->requestorId) { + if (!megasas_sriov_start_heartbeat(instance, 1)) + megasas_start_timer(instance, + &instance->sriov_heartbeat_timer, + megasas_sriov_heartbeat_handler, + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + else + instance->skip_heartbeat_timer_del = 1; + } + return 0; fail_init_adapter: @@ -3695,20 +4328,27 @@ megasas_get_seq_num(struct megasas_instance *instance, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); - dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = el_info_h; - dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); - - megasas_issue_blocked_cmd(instance, cmd); - - /* - * Copy the data back into callers buffer - */ - memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); + + if (megasas_issue_blocked_cmd(instance, cmd, 30)) + dev_err(&instance->pdev->dev, "Command timedout" + "from %s\n", __func__); + else { + /* + * Copy the data back into callers buffer + */ + eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); + eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); + eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); + eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); + eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); + } pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), el_info, el_info_h); @@ -3755,6 +4395,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, if (instance->aen_cmd) { prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; + prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale); /* * A class whose enum value is smaller is inclusive of all @@ -3783,7 +4424,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, instance->aen_cmd->abort_aen = 1; ret_val = megasas_issue_blocked_abort_cmd(instance, instance-> - aen_cmd); + aen_cmd, 30); if (ret_val) { printk(KERN_DEBUG "megasas: Failed to abort " @@ -3810,16 +4451,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); + dcmd->mbox.w[0] = cpu_to_le32(seq_num); instance->last_seq_num = seq_num; - dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); - dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; - dcmd->mbox.w[0] = seq_num; - dcmd->mbox.w[1] = curr_aen.word; - dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; - dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); + dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); if (instance->aen_cmd != NULL) { megasas_return_cmd(instance, cmd); @@ -3865,8 +4506,9 @@ static int megasas_start_aen(struct megasas_instance *instance) class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; - return megasas_register_aen(instance, eli.newest_seq_num + 1, - class_locale.word); + return megasas_register_aen(instance, + eli.newest_seq_num + 1, + class_locale.word); } /** @@ -3882,8 +4524,7 @@ static int megasas_io_attach(struct megasas_instance *instance) */ host->irq = instance->pdev->irq; host->unique_id = instance->unique_id; - if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { + if (instance->is_imr) { host->can_queue = instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; } else @@ -3925,7 +4566,9 @@ static int megasas_io_attach(struct megasas_instance *instance) /* Fusion only supports host reset */ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) { + (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { host->hostt->eh_device_reset_handler = NULL; host->hostt->eh_bus_reset_handler = NULL; } @@ -3961,6 +4604,20 @@ megasas_set_dma_mask(struct pci_dev *pdev) if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) goto fail_set_dma_mask; } + /* + * Ensure that all data structures are allocated in 32-bit + * memory. + */ + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + /* Try 32bit DMA mask and 32 bit Consistent dma mask */ + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + dev_info(&pdev->dev, "set 32bit DMA mask" + "and 32 bit consistent mask\n"); + else + goto fail_set_dma_mask; + } + return 0; fail_set_dma_mask: @@ -3975,7 +4632,7 @@ fail_set_dma_mask: static int megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { - int rval, pos, i, j; + int rval, pos, i, j, cpu; struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; @@ -3984,12 +4641,12 @@ static int megasas_probe_one(struct pci_dev *pdev, if (reset_devices) { pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); if (pos) { - pci_read_config_word(pdev, msi_control_reg(pos), + pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &control); if (control & PCI_MSIX_FLAGS_ENABLE) { dev_info(&pdev->dev, "resetting MSI-X\n"); pci_write_config_word(pdev, - msi_control_reg(pos), + pos + PCI_MSIX_FLAGS, control & ~PCI_MSIX_FLAGS_ENABLE); } @@ -4035,7 +4692,9 @@ static int megasas_probe_one(struct pci_dev *pdev, switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_PLASMA: case PCI_DEVICE_ID_LSI_INVADER: + case PCI_DEVICE_ID_LSI_FURY: { struct fusion_context *fusion; @@ -4076,6 +4735,7 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->ev = NULL; instance->issuepend_done = 1; instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + instance->is_imr = 0; megasas_poll_wait_aen = 0; instance->evt_detail = pci_alloc_consistent(pdev, @@ -4126,9 +4786,12 @@ static int megasas_probe_one(struct pci_dev *pdev, instance->unload = 1; instance->last_time = 0; instance->disableOnlineCtrlReset = 1; + instance->UnevenSpanSupport = 0; if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) + (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); else INIT_WORK(&instance->work_init, process_fw_state_change_wq); @@ -4139,11 +4802,33 @@ static int megasas_probe_one(struct pci_dev *pdev, if (megasas_init_fw(instance)) goto fail_init_mfi; + if (instance->requestorId) { + if (instance->PlasmaFW111) { + instance->vf_affiliation_111 = + pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), + &instance->vf_affiliation_111_h); + if (!instance->vf_affiliation_111) + printk(KERN_WARNING "megasas: Can't allocate " + "memory for VF affiliation buffer\n"); + } else { + instance->vf_affiliation = + pci_alloc_consistent(pdev, + (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + &instance->vf_affiliation_h); + if (!instance->vf_affiliation) + printk(KERN_WARNING "megasas: Can't allocate " + "memory for VF affiliation buffer\n"); + } + } + +retry_irq_register: /* * Register IRQ */ if (instance->msix_vectors) { - for (i = 0 ; i < instance->msix_vectors; i++) { + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; if (request_irq(instance->msixentry[i].vector, @@ -4152,12 +4837,22 @@ static int megasas_probe_one(struct pci_dev *pdev, &instance->irq_context[i])) { printk(KERN_DEBUG "megasas: Failed to " "register IRQ for vector %d.\n", i); - for (j = 0 ; j < i ; j++) + for (j = 0; j < i; j++) { + irq_set_affinity_hint( + instance->msixentry[j].vector, NULL); free_irq( instance->msixentry[j].vector, &instance->irq_context[j]); - goto fail_irq; + } + /* Retry irq register for IO_APIC */ + instance->msix_vectors = 0; + goto retry_irq_register; } + if (irq_set_affinity_hint(instance->msixentry[i].vector, + get_cpu_mask(cpu))) + dev_err(&instance->pdev->dev, "Error setting" + "affinity hint for cpu %d\n", cpu); + cpu = cpumask_next(cpu, cpu_online_mask); } } else { instance->irq_context[0].instance = instance; @@ -4170,7 +4865,7 @@ static int megasas_probe_one(struct pci_dev *pdev, } } - instance->instancet->enable_intr(instance->reg_set); + instance->instancet->enable_intr(instance); /* * Store instance in PCI softstate @@ -4209,17 +4904,21 @@ static int megasas_probe_one(struct pci_dev *pdev, megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; megasas_mgmt_info.max_index--; - pci_set_drvdata(pdev, NULL); - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); if (instance->msix_vectors) - for (i = 0 ; i < instance->msix_vectors; i++) + for (i = 0; i < instance->msix_vectors; i++) { + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); + } else free_irq(instance->pdev->irq, &instance->irq_context[0]); fail_irq: if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) + (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) megasas_release_fusion(instance); else megasas_release_mfi(instance); @@ -4271,14 +4970,16 @@ static void megasas_flush_cache(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; - dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; - dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; - megasas_issue_blocked_cmd(instance, cmd); + if (megasas_issue_blocked_cmd(instance, cmd, 30)) + dev_err(&instance->pdev->dev, "Command timedout" + " from %s\n", __func__); megasas_return_cmd(instance, cmd); @@ -4305,10 +5006,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, return; if (instance->aen_cmd) - megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); + megasas_issue_blocked_abort_cmd(instance, + instance->aen_cmd, 30); if (instance->map_update_cmd) megasas_issue_blocked_abort_cmd(instance, - instance->map_update_cmd); + instance->map_update_cmd, 30); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); @@ -4316,13 +5018,15 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; - dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; - dcmd->opcode = opcode; + dcmd->opcode = cpu_to_le32(opcode); - megasas_issue_blocked_cmd(instance, cmd); + if (megasas_issue_blocked_cmd(instance, cmd, 30)) + dev_err(&instance->pdev->dev, "Command timedout" + "from %s\n", __func__); megasas_return_cmd(instance, cmd); @@ -4346,6 +5050,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) host = instance->host; instance->unload = 1; + /* Shutdown SR-IOV heartbeat timer */ + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); + megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); @@ -4359,12 +5067,15 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) tasklet_kill(&instance->isr_tasklet); pci_set_drvdata(instance->pdev, instance); - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); if (instance->msix_vectors) - for (i = 0 ; i < instance->msix_vectors; i++) + for (i = 0; i < instance->msix_vectors; i++) { + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); + } else free_irq(instance->pdev->irq, &instance->irq_context[0]); if (instance->msix_vectors) @@ -4385,7 +5096,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) static int megasas_resume(struct pci_dev *pdev) { - int rval, i, j; + int rval, i, j, cpu; struct Scsi_Host *host; struct megasas_instance *instance; @@ -4429,7 +5140,9 @@ megasas_resume(struct pci_dev *pdev) switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_PLASMA: case PCI_DEVICE_ID_LSI_INVADER: + case PCI_DEVICE_ID_LSI_FURY: { megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { @@ -4456,6 +5169,7 @@ megasas_resume(struct pci_dev *pdev) * Register IRQ */ if (instance->msix_vectors) { + cpu = cpumask_first(cpu_online_mask); for (i = 0 ; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; @@ -4465,12 +5179,21 @@ megasas_resume(struct pci_dev *pdev) &instance->irq_context[i])) { printk(KERN_DEBUG "megasas: Failed to " "register IRQ for vector %d.\n", i); - for (j = 0 ; j < i ; j++) + for (j = 0; j < i; j++) { + irq_set_affinity_hint( + instance->msixentry[j].vector, NULL); free_irq( instance->msixentry[j].vector, &instance->irq_context[j]); + } goto fail_irq; } + + if (irq_set_affinity_hint(instance->msixentry[i].vector, + get_cpu_mask(cpu))) + dev_err(&instance->pdev->dev, "Error setting" + "affinity hint for cpu %d\n", cpu); + cpu = cpumask_next(cpu, cpu_online_mask); } } else { instance->irq_context[0].instance = instance; @@ -4483,7 +5206,18 @@ megasas_resume(struct pci_dev *pdev) } } - instance->instancet->enable_intr(instance->reg_set); + /* Re-launch SR-IOV heartbeat timer */ + if (instance->requestorId) { + if (!megasas_sriov_start_heartbeat(instance, 0)) + megasas_start_timer(instance, + &instance->sriov_heartbeat_timer, + megasas_sriov_heartbeat_handler, + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + else + instance->skip_heartbeat_timer_del = 1; + } + + instance->instancet->enable_intr(instance); instance->unload = 0; /* @@ -4537,6 +5271,10 @@ static void megasas_detach_one(struct pci_dev *pdev) host = instance->host; fusion = instance->ctrl_context; + /* Shutdown SR-IOV heartbeat timer */ + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); + scsi_remove_host(instance->host); megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); @@ -4548,6 +5286,9 @@ static void megasas_detach_one(struct pci_dev *pdev) instance->ev = NULL; } + /* cancel all wait events */ + wake_up_all(&instance->int_cmd_wait_q); + tasklet_kill(&instance->isr_tasklet); /* @@ -4563,14 +5304,15 @@ static void megasas_detach_one(struct pci_dev *pdev) } } - pci_set_drvdata(instance->pdev, NULL); - - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); if (instance->msix_vectors) - for (i = 0 ; i < instance->msix_vectors; i++) + for (i = 0; i < instance->msix_vectors; i++) { + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); + } else free_irq(instance->pdev->irq, &instance->irq_context[0]); if (instance->msix_vectors) @@ -4578,7 +5320,9 @@ static void megasas_detach_one(struct pci_dev *pdev) switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_PLASMA: case PCI_DEVICE_ID_LSI_INVADER: + case PCI_DEVICE_ID_LSI_FURY: megasas_release_fusion(instance); for (i = 0; i < 2 ; i++) if (fusion->ld_map[i]) @@ -4591,10 +5335,6 @@ static void megasas_detach_one(struct pci_dev *pdev) break; default: megasas_release_mfi(instance); - pci_free_consistent(pdev, - sizeof(struct megasas_evt_detail), - instance->evt_detail, - instance->evt_detail_h); pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); @@ -4604,9 +5344,28 @@ static void megasas_detach_one(struct pci_dev *pdev) break; } - scsi_host_put(host); + if (instance->evt_detail) + pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), + instance->evt_detail, instance->evt_detail_h); - pci_set_drvdata(pdev, NULL); + if (instance->vf_affiliation) + pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * + sizeof(struct MR_LD_VF_AFFILIATION), + instance->vf_affiliation, + instance->vf_affiliation_h); + + if (instance->vf_affiliation_111) + pci_free_consistent(pdev, + sizeof(struct MR_LD_VF_AFFILIATION_111), + instance->vf_affiliation_111, + instance->vf_affiliation_111_h); + + if (instance->hb_host_mem) + pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), + instance->hb_host_mem, + instance->hb_host_mem_h); + + scsi_host_put(host); pci_disable_device(pdev); @@ -4625,11 +5384,14 @@ static void megasas_shutdown(struct pci_dev *pdev) instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); if (instance->msix_vectors) - for (i = 0 ; i < instance->msix_vectors; i++) + for (i = 0; i < instance->msix_vectors; i++) { + irq_set_affinity_hint( + instance->msixentry[i].vector, NULL); free_irq(instance->msixentry[i].vector, &instance->irq_context[i]); + } else free_irq(instance->pdev->irq, &instance->irq_context[0]); if (instance->msix_vectors) @@ -4734,10 +5496,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * alone separately */ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); - cmd->frame->hdr.context = cmd->index; + cmd->frame->hdr.context = cpu_to_le32(cmd->index); cmd->frame->hdr.pad_0 = 0; - cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | - MFI_FRAME_SENSE64); + cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | + MFI_FRAME_SGL64 | + MFI_FRAME_SENSE64)); /* * The management interface between applications and the fw uses @@ -4771,8 +5534,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * We don't change the dma_coherent_mask, so * pci_alloc_consistent only returns 32bit addresses */ - kern_sge32[i].phys_addr = (u32) buf_handle; - kern_sge32[i].length = ioc->sgl[i].iov_len; + kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); + kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); /* * We created a kernel buffer corresponding to the @@ -4795,7 +5558,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, sense_ptr = (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); - *sense_ptr = sense_handle; + *sense_ptr = cpu_to_le32(sense_handle); } /* @@ -4803,7 +5566,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; - megasas_issue_blocked_cmd(instance, cmd); + megasas_issue_blocked_cmd(instance, cmd, 0); cmd->sync_cmd = 0; /* @@ -4852,10 +5615,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, sense, sense_handle); } - for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) { - dma_free_coherent(&instance->pdev->dev, - kern_sge32[i].length, - kbuff_arr[i], kern_sge32[i].phys_addr); + for (i = 0; i < ioc->sge_count; i++) { + if (kbuff_arr[i]) + dma_free_coherent(&instance->pdev->dev, + le32_to_cpu(kern_sge32[i].length), + kbuff_arr[i], + le32_to_cpu(kern_sge32[i].phys_addr)); } megasas_return_cmd(instance, cmd); @@ -4888,6 +5653,16 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) goto out_kfree_ioc; } + /* Adjust ioctl wait time for VF mode */ + if (instance->requestorId) + wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; + + /* Block ioctls in VF mode */ + if (instance->requestorId && !allow_vf_ioctls) { + error = -ENODEV; + goto out_kfree_ioc; + } + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_ERR "Controller in crit error\n"); error = -ENODEV; @@ -4931,11 +5706,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) printk(KERN_ERR "megaraid_sas: timed out while" "waiting for HBA to recover\n"); error = -ENODEV; - goto out_kfree_ioc; + goto out_up; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); + out_up: up(&instance->ioctl_sem); out_kfree_ioc: @@ -5196,7 +5972,7 @@ megasas_aen_polling(struct work_struct *work) u16 pd_index = 0; u16 ld_index = 0; int i, j, doscan = 0; - u32 seq_num; + u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; int error; if (!instance) { @@ -5204,11 +5980,28 @@ megasas_aen_polling(struct work_struct *work) kfree(ev); return; } + + /* Adjust event workqueue thread wait time for VF mode */ + if (instance->requestorId) + wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; + + /* Don't run the event workqueue thread if OCR is running */ + for (i = 0; i < wait_time; i++) { + if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) + break; + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { + printk(KERN_NOTICE "megasas: %s waiting for " + "controller reset to finish for scsi%d\n", + __func__, instance->host->host_no); + } + msleep(1000); + } + instance->ev = NULL; host = instance->host; if (instance->evt_detail) { - switch (instance->evt_detail->code) { + switch (le32_to_cpu(instance->evt_detail->code)) { case MR_EVT_PD_INSERTED: if (megasas_get_pd_list(instance) == 0) { for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { @@ -5270,61 +6063,64 @@ megasas_aen_polling(struct work_struct *work) case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: - megasas_get_ld_list(instance); - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { - for (j = 0; - j < MEGASAS_MAX_DEV_PER_CHANNEL; - j++) { - - ld_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - - sdev1 = scsi_device_lookup(host, - i + MEGASAS_MAX_LD_CHANNELS, - j, - 0); - - if (instance->ld_ids[ld_index] != 0xff) { - if (sdev1) { - scsi_device_put(sdev1); - } - } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); + if (!instance->requestorId || + (instance->requestorId && + megasas_get_ld_vf_affiliation(instance, 0))) { + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); + for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { + for (j = 0; + j < MEGASAS_MAX_DEV_PER_CHANNEL; + j++) { + + ld_index = + (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + + sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + + if (instance->ld_ids[ld_index] + != 0xff) { + if (sdev1) + scsi_device_put(sdev1); + } else { + if (sdev1) { + scsi_remove_device(sdev1); + scsi_device_put(sdev1); + } + } } } - } + doscan = 0; } - doscan = 0; break; case MR_EVT_LD_CREATED: - megasas_get_ld_list(instance); - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { - for (j = 0; - j < MEGASAS_MAX_DEV_PER_CHANNEL; - j++) { - ld_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - - sdev1 = scsi_device_lookup(host, - i+MEGASAS_MAX_LD_CHANNELS, - j, 0); - - if (instance->ld_ids[ld_index] != - 0xff) { - if (!sdev1) { - scsi_add_device(host, - i + 2, - j, 0); + if (!instance->requestorId || + (instance->requestorId && + megasas_get_ld_vf_affiliation(instance, 0))) { + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); + for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { + for (j = 0; + j < MEGASAS_MAX_DEV_PER_CHANNEL; + j++) { + ld_index = + (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + + sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + + if (instance->ld_ids[ld_index] + != 0xff) { + if (!sdev1) + scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); } - } - if (sdev1) { - scsi_device_put(sdev1); + if (sdev1) + scsi_device_put(sdev1); } } + doscan = 0; } - doscan = 0; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: @@ -5342,48 +6138,55 @@ megasas_aen_polling(struct work_struct *work) } if (doscan) { - printk(KERN_INFO "scanning ...\n"); - megasas_get_pd_list(instance); - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { - pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; - sdev1 = scsi_device_lookup(host, i, j, 0); - if (instance->pd_list[pd_index].driveState == - MR_PD_STATE_SYSTEM) { - if (!sdev1) { - scsi_add_device(host, i, j, 0); - } - if (sdev1) - scsi_device_put(sdev1); - } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); + printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n", + instance->host->host_no); + if (megasas_get_pd_list(instance) == 0) { + for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; + sdev1 = scsi_device_lookup(host, i, j, 0); + if (instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) { + if (!sdev1) { + scsi_add_device(host, i, j, 0); + } + if (sdev1) + scsi_device_put(sdev1); + } else { + if (sdev1) { + scsi_remove_device(sdev1); + scsi_device_put(sdev1); + } } } } } - megasas_get_ld_list(instance); - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { - ld_index = - (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + if (!instance->requestorId || + (instance->requestorId && + megasas_get_ld_vf_affiliation(instance, 0))) { + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) + megasas_get_ld_list(instance); + for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; + j++) { + ld_index = + (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - sdev1 = scsi_device_lookup(host, - i+MEGASAS_MAX_LD_CHANNELS, j, 0); - if (instance->ld_ids[ld_index] != 0xff) { - if (!sdev1) { - scsi_add_device(host, - i+2, - j, 0); + sdev1 = scsi_device_lookup(host, + MEGASAS_MAX_PD_CHANNELS + i, j, 0); + if (instance->ld_ids[ld_index] + != 0xff) { + if (!sdev1) + scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + else + scsi_device_put(sdev1); } else { - scsi_device_put(sdev1); - } - } else { - if (sdev1) { - scsi_remove_device(sdev1); - scsi_device_put(sdev1); + if (sdev1) { + scsi_remove_device(sdev1); + scsi_device_put(sdev1); + } } } } @@ -5395,7 +6198,7 @@ megasas_aen_polling(struct work_struct *work) return ; } - seq_num = instance->evt_detail->seq_num + 1; + seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index a11df82474e..081bfff12d0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -60,10 +60,22 @@ #define FALSE 0 #define TRUE 1 +#define SPAN_DEBUG 0 +#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) +#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) +#define SPAN_INVALID 0xff + /* Prototypes */ -void -mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, - struct LD_LOAD_BALANCE_INFO *lbInfo); +void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, + struct LD_LOAD_BALANCE_INFO *lbInfo); + +static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, + PLD_SPAN_INFO ldSpanInfo); +static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, + u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map); +static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, + u64 strip, struct MR_FW_RAID_MAP_ALL *map); u32 mega_mod64(u64 dividend, u32 divisor) { @@ -114,27 +126,27 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map) return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; } -static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) { - return map->raidMap.arMapInfo[ar].pd[arm]; + return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); } -static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) { - return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; + return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); } -static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) +u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) { return map->raidMap.devHndlInfo[pd].curDevHdl; } u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map) { - return map->raidMap.ldSpanMap[ld].ldRaid.targetId; + return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); } -u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) +u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) { return map->raidMap.ldTgtIdToLd[ldTgtId]; } @@ -148,32 +160,51 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, /* * This function will validate Map info data provided by FW */ -u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, - struct LD_LOAD_BALANCE_INFO *lbInfo) +u8 MR_ValidateMapInfo(struct megasas_instance *instance) { + struct fusion_context *fusion = instance->ctrl_context; + struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)]; + struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; + struct MR_LD_RAID *raid; + int ldCount, num_lds; + u16 ld; + - if (pFwRaidMap->totalSize != + if (le32_to_cpu(pFwRaidMap->totalSize) != (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + - (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) { + (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) { printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP)) + (sizeof(struct MR_LD_SPAN_MAP) * - pFwRaidMap->ldCount))); + le32_to_cpu(pFwRaidMap->ldCount)))); printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), - pFwRaidMap->totalSize); + le32_to_cpu(pFwRaidMap->totalSize)); return 0; } + if (instance->UnevenSpanSupport) + mr_update_span_set(map, ldSpanInfo); + mr_update_load_balance_params(map, lbInfo); + num_lds = le32_to_cpu(map->raidMap.ldCount); + + /*Convert Raid capability values to CPU arch */ + for (ldCount = 0; ldCount < num_lds; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, map); + raid = MR_LdRaidGet(ld, map); + le32_to_cpus((u32 *)&raid->capability); + } + return 1; } u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, - struct MR_FW_RAID_MAP_ALL *map, int *div_error) + struct MR_FW_RAID_MAP_ALL *map) { struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); struct MR_QUAD_ELEMENT *quad; @@ -182,32 +213,476 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { - for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { + for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { quad = &pSpanBlock->block_span_info.quad[j]; - if (quad->diff == 0) { - *div_error = 1; - return span; - } - if (quad->logStart <= row && row <= quad->logEnd && - (mega_mod64(row-quad->logStart, quad->diff)) == 0) { + if (le32_to_cpu(quad->diff) == 0) + return SPAN_INVALID; + if (le64_to_cpu(quad->logStart) <= row && row <= + le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), + le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { u64 blk, debugBlk; - blk = - mega_div64_32( - (row-quad->logStart), - quad->diff); + blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); debugBlk = blk; - blk = (blk + quad->offsetInSpan) << - raid->stripeShift; + blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; } return span; } } } - return span; + return SPAN_INVALID; +} + +/* +****************************************************************************** +* +* Function to print info about span set created in driver from FW raid map +* +* Inputs : +* map - LD map +* ldSpanInfo - ldSpanInfo per HBA instance +*/ +#if SPAN_DEBUG +static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) +{ + + u8 span; + u32 element; + struct MR_LD_RAID *raid; + LD_SPAN_SET *span_set; + struct MR_QUAD_ELEMENT *quad; + int ldCount; + u16 ld; + + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, map); + if (ld >= MAX_LOGICAL_DRIVES) + continue; + raid = MR_LdRaidGet(ld, map); + dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", + ld, raid->spanDepth); + for (span = 0; span < raid->spanDepth; span++) + dev_dbg(&instance->pdev->dev, "Span=%x," + " number of quads=%x\n", span, + le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements)); + for (element = 0; element < MAX_QUAD_DEPTH; element++) { + span_set = &(ldSpanInfo[ld].span_set[element]); + if (span_set->span_row_data_width == 0) + break; + + dev_dbg(&instance->pdev->dev, "Span Set %x:" + "width=%x, diff=%x\n", element, + (unsigned int)span_set->span_row_data_width, + (unsigned int)span_set->diff); + dev_dbg(&instance->pdev->dev, "logical LBA" + "start=0x%08lx, end=0x%08lx\n", + (long unsigned int)span_set->log_start_lba, + (long unsigned int)span_set->log_end_lba); + dev_dbg(&instance->pdev->dev, "span row start=0x%08lx," + " end=0x%08lx\n", + (long unsigned int)span_set->span_row_start, + (long unsigned int)span_set->span_row_end); + dev_dbg(&instance->pdev->dev, "data row start=0x%08lx," + " end=0x%08lx\n", + (long unsigned int)span_set->data_row_start, + (long unsigned int)span_set->data_row_end); + dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx," + " end=0x%08lx\n", + (long unsigned int)span_set->data_strip_start, + (long unsigned int)span_set->data_strip_end); + + for (span = 0; span < raid->spanDepth; span++) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= + element + 1) { + quad = &map->raidMap.ldSpanMap[ld]. + spanBlock[span].block_span_info. + quad[element]; + dev_dbg(&instance->pdev->dev, "Span=%x," + "Quad=%x, diff=%x\n", span, + element, le32_to_cpu(quad->diff)); + dev_dbg(&instance->pdev->dev, + "offset_in_span=0x%08lx\n", + (long unsigned int)le64_to_cpu(quad->offsetInSpan)); + dev_dbg(&instance->pdev->dev, + "logical start=0x%08lx, end=0x%08lx\n", + (long unsigned int)le64_to_cpu(quad->logStart), + (long unsigned int)le64_to_cpu(quad->logEnd)); + } + } + } + } + return 0; +} +#endif + +/* +****************************************************************************** +* +* This routine calculates the Span block for given row using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* row - Row number +* map - LD map +* +* Outputs : +* +* span - Span number +* block - Absolute Block number in the physical disk +* div_error - Devide error code. +*/ + +u32 mr_spanset_get_span_block(struct megasas_instance *instance, + u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + struct MR_QUAD_ELEMENT *quad; + u32 span, info; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + + for (info = 0; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + + if (row > span_set->data_row_end) + continue; + + for (span = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { + quad = &map->raidMap.ldSpanMap[ld]. + spanBlock[span]. + block_span_info.quad[info]; + if (le32_to_cpu(quad->diff == 0)) + return SPAN_INVALID; + if (le64_to_cpu(quad->logStart) <= row && + row <= le64_to_cpu(quad->logEnd) && + (mega_mod64(row - le64_to_cpu(quad->logStart), + le32_to_cpu(quad->diff))) == 0) { + if (span_blk != NULL) { + u64 blk; + blk = mega_div64_32 + ((row - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)); + blk = (blk + le64_to_cpu(quad->offsetInSpan)) + << raid->stripeShift; + *span_blk = blk; + } + return span; + } + } + } + return SPAN_INVALID; +} + +/* +****************************************************************************** +* +* This routine calculates the row for given strip using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* Strip - Strip +* map - LD map +* +* Outputs : +* +* row - row associated with strip +*/ + +static u64 get_row_from_strip(struct megasas_instance *instance, + u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + u32 info, strip_offset, span, span_offset; + u64 span_set_Strip, span_set_Row, retval; + + for (info = 0; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + if (strip > span_set->data_strip_end) + continue; + + span_set_Strip = strip - span_set->data_strip_start; + strip_offset = mega_mod64(span_set_Strip, + span_set->span_row_data_width); + span_set_Row = mega_div64_32(span_set_Strip, + span_set->span_row_data_width) * span_set->diff; + for (span = 0, span_offset = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements >= info+1)) { + if (strip_offset >= + span_set->strip_offset[span]) + span_offset++; + else + break; + } +#if SPAN_DEBUG + dev_info(&instance->pdev->dev, "Strip 0x%llx," + "span_set_Strip 0x%llx, span_set_Row 0x%llx" + "data width 0x%llx span offset 0x%x\n", strip, + (unsigned long long)span_set_Strip, + (unsigned long long)span_set_Row, + (unsigned long long)span_set->span_row_data_width, + span_offset); + dev_info(&instance->pdev->dev, "For strip 0x%llx" + "row is 0x%llx\n", strip, + (unsigned long long) span_set->data_row_start + + (unsigned long long) span_set_Row + (span_offset - 1)); +#endif + retval = (span_set->data_row_start + span_set_Row + + (span_offset - 1)); + return retval; + } + return -1LLU; +} + + +/* +****************************************************************************** +* +* This routine calculates the Start Strip for given row using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* row - Row number +* map - LD map +* +* Outputs : +* +* Strip - Start strip associated with row +*/ + +static u64 get_strip_from_row(struct megasas_instance *instance, + u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + struct MR_QUAD_ELEMENT *quad; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + u32 span, info; + u64 strip; + + for (info = 0; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + if (row > span_set->data_row_end) + continue; + + for (span = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { + quad = &map->raidMap.ldSpanMap[ld]. + spanBlock[span].block_span_info.quad[info]; + if (le64_to_cpu(quad->logStart) <= row && + row <= le64_to_cpu(quad->logEnd) && + mega_mod64((row - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)) == 0) { + strip = mega_div64_32 + (((row - span_set->data_row_start) + - le64_to_cpu(quad->logStart)), + le32_to_cpu(quad->diff)); + strip *= span_set->span_row_data_width; + strip += span_set->data_strip_start; + strip += span_set->strip_offset[span]; + return strip; + } + } + } + dev_err(&instance->pdev->dev, "get_strip_from_row" + "returns invalid strip for ld=%x, row=%lx\n", + ld, (long unsigned int)row); + return -1; +} + +/* +****************************************************************************** +* +* This routine calculates the Physical Arm for given strip using spanset. +* +* Inputs : +* instance - HBA instance +* ld - Logical drive number +* strip - Strip +* map - LD map +* +* Outputs : +* +* Phys Arm - Phys Arm associated with strip +*/ + +static u32 get_arm_from_strip(struct megasas_instance *instance, + u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) +{ + struct fusion_context *fusion = instance->ctrl_context; + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + LD_SPAN_SET *span_set; + PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; + u32 info, strip_offset, span, span_offset, retval; + + for (info = 0 ; info < MAX_QUAD_DEPTH; info++) { + span_set = &(ldSpanInfo[ld].span_set[info]); + + if (span_set->span_row_data_width == 0) + break; + if (strip > span_set->data_strip_end) + continue; + + strip_offset = (uint)mega_mod64 + ((strip - span_set->data_strip_start), + span_set->span_row_data_width); + + for (span = 0, span_offset = 0; span < raid->spanDepth; span++) + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) >= info+1) { + if (strip_offset >= + span_set->strip_offset[span]) + span_offset = + span_set->strip_offset[span]; + else + break; + } +#if SPAN_DEBUG + dev_info(&instance->pdev->dev, "get_arm_from_strip:" + "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld, + (long unsigned int)strip, (strip_offset - span_offset)); +#endif + retval = (strip_offset - span_offset); + return retval; + } + + dev_err(&instance->pdev->dev, "get_arm_from_strip" + "returns invalid arm for ld=%x strip=%lx\n", + ld, (long unsigned int)strip); + + return -1; +} + +/* This Function will return Phys arm */ +u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, + struct MR_FW_RAID_MAP_ALL *map) +{ + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + /* Need to check correct default value */ + u32 arm = 0; + + switch (raid->level) { + case 0: + case 5: + case 6: + arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); + break; + case 1: + /* start with logical arm */ + arm = get_arm_from_strip(instance, ld, stripe, map); + if (arm != -1U) + arm *= 2; + break; + } + + return arm; +} + + +/* +****************************************************************************** +* +* This routine calculates the arm, span and block for the specified stripe and +* reference in stripe using spanset +* +* Inputs : +* +* ld - Logical drive number +* stripRow - Stripe number +* stripRef - Reference in stripe +* +* Outputs : +* +* span - Span number +* block - Absolute Block number in the physical disk +*/ +static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, + u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_FW_RAID_MAP_ALL *map) +{ + struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); + u32 pd, arRef; + u8 physArm, span; + u64 row; + u8 retval = TRUE; + u8 do_invader = 0; + u64 *pdBlock = &io_info->pdBlock; + u16 *pDevHandle = &io_info->devHandle; + u32 logArm, rowMod, armQ, arm; + + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || + instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) + do_invader = 1; + + /*Get row and span from io_info for Uneven Span IO.*/ + row = io_info->start_row; + span = io_info->start_span; + + + if (raid->level == 6) { + logArm = get_arm_from_strip(instance, ld, stripRow, map); + if (logArm == -1U) + return FALSE; + rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); + armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; + arm = armQ + 1 + logArm; + if (arm >= SPAN_ROW_SIZE(map, ld, span)) + arm -= SPAN_ROW_SIZE(map, ld, span); + physArm = (u8)arm; + } else + /* Calculate the arm */ + physArm = get_arm(instance, ld, span, stripRow, map); + if (physArm == 0xFF) + return FALSE; + + arRef = MR_LdSpanArrayGet(ld, span, map); + pd = MR_ArPdGet(arRef, physArm, map); + + if (pd != MR_PD_INVALID) + *pDevHandle = MR_PdDevHandleGet(pd, map); + else { + *pDevHandle = MR_PD_INVALID; + if ((raid->level >= 5) && + (!do_invader || (do_invader && + (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) + pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; + else if (raid->level == 1) { + pd = MR_ArPdGet(arRef, physArm + 1, map); + if (pd != MR_PD_INVALID) + *pDevHandle = MR_PdDevHandleGet(pd, map); + } + } + + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); + pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | + physArm; + return retval; } /* @@ -228,16 +703,22 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, * block - Absolute Block number in the physical disk */ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, - u16 stripRef, u64 *pdBlock, u16 *pDevHandle, - struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map) + u16 stripRef, struct IO_REQUEST_INFO *io_info, + struct RAID_CONTEXT *pRAID_Context, + struct MR_FW_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); u32 pd, arRef; u8 physArm, span; u64 row; u8 retval = TRUE; - int error_code = 0; + u8 do_invader = 0; + u64 *pdBlock = &io_info->pdBlock; + u16 *pDevHandle = &io_info->devHandle; + + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER || + instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) + do_invader = 1; row = mega_div64_32(stripRow, raid->rowDataSize); @@ -267,8 +748,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, span = 0; *pdBlock = row << raid->stripeShift; } else { - span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); - if (error_code == 1) + span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); + if (span == SPAN_INVALID) return FALSE; } @@ -282,9 +763,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, else { *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ if ((raid->level >= 5) && - ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER && - raid->regTypeReqOnRead != REGION_TYPE_UNUSED))) + (!do_invader || (do_invader && + (raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ @@ -295,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, } } - *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; + *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm; return retval; @@ -314,7 +794,7 @@ u8 MR_BuildRaidContext(struct megasas_instance *instance, struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map) + struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN) { struct MR_LD_RAID *raid; u32 ld, stripSize, stripe_mask; @@ -327,17 +807,42 @@ MR_BuildRaidContext(struct megasas_instance *instance, u32 numBlocks, ldTgtId; u8 isRead; u8 retval = 0; + u8 startlba_span = SPAN_INVALID; + u64 *pdBlock = &io_info->pdBlock; ldStartBlock = io_info->ldStartBlock; numBlocks = io_info->numBlocks; ldTgtId = io_info->ldTgtId; isRead = io_info->isRead; + io_info->IoforUnevenSpan = 0; + io_info->start_span = SPAN_INVALID; ld = MR_TargetIdToLdGet(ldTgtId, map); raid = MR_LdRaidGet(ld, map); + /* + * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero + * return FALSE + */ + if (raid->rowDataSize == 0) { + if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) + return FALSE; + else if (instance->UnevenSpanSupport) { + io_info->IoforUnevenSpan = 1; + } else { + dev_info(&instance->pdev->dev, + "raid->rowDataSize is 0, but has SPAN[0]" + "rowDataSize = 0x%0x," + "but there is _NO_ UnevenSpanSupport\n", + MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); + return FALSE; + } + } + stripSize = 1 << raid->stripeShift; stripe_mask = stripSize-1; + + /* * calculate starting row and stripe, and number of strips and rows */ @@ -347,11 +852,50 @@ MR_BuildRaidContext(struct megasas_instance *instance, ref_in_end_stripe = (u16)(endLba & stripe_mask); endStrip = endLba >> raid->stripeShift; num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ - if (raid->rowDataSize == 0) - return FALSE; - start_row = mega_div64_32(start_strip, raid->rowDataSize); - endRow = mega_div64_32(endStrip, raid->rowDataSize); - numRows = (u8)(endRow - start_row + 1); + + if (io_info->IoforUnevenSpan) { + start_row = get_row_from_strip(instance, ld, start_strip, map); + endRow = get_row_from_strip(instance, ld, endStrip, map); + if (start_row == -1ULL || endRow == -1ULL) { + dev_info(&instance->pdev->dev, "return from %s %d." + "Send IO w/o region lock.\n", + __func__, __LINE__); + return FALSE; + } + + if (raid->spanDepth == 1) { + startlba_span = 0; + *pdBlock = start_row << raid->stripeShift; + } else + startlba_span = (u8)mr_spanset_get_span_block(instance, + ld, start_row, pdBlock, map); + if (startlba_span == SPAN_INVALID) { + dev_info(&instance->pdev->dev, "return from %s %d" + "for row 0x%llx,start strip %llx" + "endSrip %llx\n", __func__, __LINE__, + (unsigned long long)start_row, + (unsigned long long)start_strip, + (unsigned long long)endStrip); + return FALSE; + } + io_info->start_span = startlba_span; + io_info->start_row = start_row; +#if SPAN_DEBUG + dev_dbg(&instance->pdev->dev, "Check Span number from %s %d" + "for row 0x%llx, start strip 0x%llx end strip 0x%llx" + " span 0x%x\n", __func__, __LINE__, + (unsigned long long)start_row, + (unsigned long long)start_strip, + (unsigned long long)endStrip, startlba_span); + dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx" + "Start span 0x%x\n", (unsigned long long)start_row, + (unsigned long long)endRow, startlba_span); +#endif + } else { + start_row = mega_div64_32(start_strip, raid->rowDataSize); + endRow = mega_div64_32(endStrip, raid->rowDataSize); + } + numRows = (u8)(endRow - start_row + 1); /* * calculate region info. @@ -384,65 +928,230 @@ MR_BuildRaidContext(struct megasas_instance *instance, regSize = numBlocks; } /* multi-strip IOs always need to full stripe locked */ - } else { + } else if (io_info->IoforUnevenSpan == 0) { + /* + * For Even span region lock optimization. + * If the start strip is the last in the start row + */ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { - /* If the start strip is the last in the start row */ regStart += ref_in_start_stripe; - regSize = stripSize - ref_in_start_stripe; /* initialize count to sectors from startref to end of strip */ + regSize = stripSize - ref_in_start_stripe; } + /* add complete rows in the middle of the transfer */ if (numRows > 2) - /* Add complete rows in the middle of the transfer */ regSize += (numRows-2) << raid->stripeShift; - /* if IO ends within first strip of last row */ + /* if IO ends within first strip of last row*/ if (endStrip == endRow*raid->rowDataSize) regSize += ref_in_end_stripe+1; else regSize += stripSize; + } else { + /* + * For Uneven span region lock optimization. + * If the start strip is the last in the start row + */ + if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + + SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { + regStart += ref_in_start_stripe; + /* initialize count to sectors from + * startRef to end of strip + */ + regSize = stripSize - ref_in_start_stripe; + } + /* Add complete rows in the middle of the transfer*/ + + if (numRows > 2) + /* Add complete rows in the middle of the transfer*/ + regSize += (numRows-2) << raid->stripeShift; + + /* if IO ends within first strip of last row */ + if (endStrip == get_strip_from_row(instance, ld, endRow, map)) + regSize += ref_in_end_stripe + 1; + else + regSize += stripSize; } - pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; - if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) + pRAID_Context->timeoutValue = + cpu_to_le16(raid->fpIoTimeoutForLd ? + raid->fpIoTimeoutForLd : + map->raidMap.fpPdIoTimeoutSec); + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite; else pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->VirtualDiskTgtId = raid->targetId; - pRAID_Context->regLockRowLBA = regStart; - pRAID_Context->regLockLength = regSize; + pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); + pRAID_Context->regLockLength = cpu_to_le32(regSize); pRAID_Context->configSeqNum = raid->seqNum; + /* save pointer to raid->LUN array */ + *raidLUN = raid->LUN; + /*Get Phy Params only if FP capable, or else leave it to MR firmware to do the calculation.*/ if (io_info->fpOkForIo) { - retval = MR_GetPhyParams(instance, ld, start_strip, - ref_in_start_stripe, - &io_info->pdBlock, - &io_info->devHandle, pRAID_Context, - map); - /* If IO on an invalid Pd, then FP i snot possible */ + retval = io_info->IoforUnevenSpan ? + mr_spanset_get_phy_params(instance, ld, + start_strip, ref_in_start_stripe, + io_info, pRAID_Context, map) : + MR_GetPhyParams(instance, ld, start_strip, + ref_in_start_stripe, io_info, + pRAID_Context, map); + /* If IO on an invalid Pd, then FP is not possible.*/ if (io_info->devHandle == MR_PD_INVALID) io_info->fpOkForIo = FALSE; return retval; } else if (isRead) { uint stripIdx; for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { - if (!MR_GetPhyParams(instance, ld, - start_strip + stripIdx, - ref_in_start_stripe, - &io_info->pdBlock, - &io_info->devHandle, - pRAID_Context, map)) + retval = io_info->IoforUnevenSpan ? + mr_spanset_get_phy_params(instance, ld, + start_strip + stripIdx, + ref_in_start_stripe, io_info, + pRAID_Context, map) : + MR_GetPhyParams(instance, ld, + start_strip + stripIdx, ref_in_start_stripe, + io_info, pRAID_Context, map); + if (!retval) return TRUE; } } + +#if SPAN_DEBUG + /* Just for testing what arm we get for strip.*/ + if (io_info->IoforUnevenSpan) + get_arm_from_strip(instance, ld, start_strip, map); +#endif return TRUE; } +/* +****************************************************************************** +* +* This routine pepare spanset info from Valid Raid map and store it into +* local copy of ldSpanInfo per instance data structure. +* +* Inputs : +* map - LD map +* ldSpanInfo - ldSpanInfo per HBA instance +* +*/ +void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, + PLD_SPAN_INFO ldSpanInfo) +{ + u8 span, count; + u32 element, span_row_width; + u64 span_row; + struct MR_LD_RAID *raid; + LD_SPAN_SET *span_set, *span_set_prev; + struct MR_QUAD_ELEMENT *quad; + int ldCount; + u16 ld; + + + for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { + ld = MR_TargetIdToLdGet(ldCount, map); + if (ld >= MAX_LOGICAL_DRIVES) + continue; + raid = MR_LdRaidGet(ld, map); + for (element = 0; element < MAX_QUAD_DEPTH; element++) { + for (span = 0; span < raid->spanDepth; span++) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. + block_span_info.noElements) < + element + 1) + continue; + span_set = &(ldSpanInfo[ld].span_set[element]); + quad = &map->raidMap.ldSpanMap[ld]. + spanBlock[span].block_span_info. + quad[element]; + + span_set->diff = le32_to_cpu(quad->diff); + + for (count = 0, span_row_width = 0; + count < raid->spanDepth; count++) { + if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. + spanBlock[count]. + block_span_info. + noElements) >= element + 1) { + span_set->strip_offset[count] = + span_row_width; + span_row_width += + MR_LdSpanPtrGet + (ld, count, map)->spanRowDataSize; + printk(KERN_INFO "megasas:" + "span %x rowDataSize %x\n", + count, MR_LdSpanPtrGet + (ld, count, map)->spanRowDataSize); + } + } + + span_set->span_row_data_width = span_row_width; + span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - + le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), + le32_to_cpu(quad->diff)); + + if (element == 0) { + span_set->log_start_lba = 0; + span_set->log_end_lba = + ((span_row << raid->stripeShift) + * span_row_width) - 1; + + span_set->span_row_start = 0; + span_set->span_row_end = span_row - 1; + + span_set->data_strip_start = 0; + span_set->data_strip_end = + (span_row * span_row_width) - 1; + + span_set->data_row_start = 0; + span_set->data_row_end = + (span_row * le32_to_cpu(quad->diff)) - 1; + } else { + span_set_prev = &(ldSpanInfo[ld]. + span_set[element - 1]); + span_set->log_start_lba = + span_set_prev->log_end_lba + 1; + span_set->log_end_lba = + span_set->log_start_lba + + ((span_row << raid->stripeShift) + * span_row_width) - 1; + + span_set->span_row_start = + span_set_prev->span_row_end + 1; + span_set->span_row_end = + span_set->span_row_start + span_row - 1; + + span_set->data_strip_start = + span_set_prev->data_strip_end + 1; + span_set->data_strip_end = + span_set->data_strip_start + + (span_row * span_row_width) - 1; + + span_set->data_row_start = + span_set_prev->data_row_end + 1; + span_set->data_row_end = + span_set->data_row_start + + (span_row * le32_to_cpu(quad->diff)) - 1; + } + break; + } + if (span == raid->spanDepth) + break; + } + } +#if SPAN_DEBUG + getSpanInfo(map, ldSpanInfo); +#endif + +} + void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo) @@ -503,8 +1212,9 @@ u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block, diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); bestArm = (diff0 <= diff1 ? 0 : 1); - if ((bestArm == arm && pend0 > pend1 + 16) || - (bestArm != arm && pend1 > pend0 + 16)) + /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/ + if ((bestArm == arm && pend0 > pend1 + 4) || + (bestArm != arm && pend1 > pend0 + 4)) bestArm ^= 1; /* Update the last accessed block on the correct pd */ diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a7d56687bfc..22600419ae9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -62,7 +62,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); int megasas_is_ldio(struct scsi_cmnd *cmd); int -wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, + int seconds); void megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); @@ -72,28 +73,22 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); int megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd); - -u8 -MR_BuildRaidContext(struct megasas_instance *instance, - struct IO_REQUEST_INFO *io_info, - struct RAID_CONTEXT *pRAID_Context, - struct MR_FW_RAID_MAP_ALL *map); -u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); -struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); - -u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); - void megasas_check_and_restore_queue_depth(struct megasas_instance *instance); -u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, - struct LD_LOAD_BALANCE_INFO *lbInfo); u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); void megaraid_sas_kill_hba(struct megasas_instance *instance); extern u32 megasas_dbg_lvl; +void megasas_sriov_heartbeat_handler(unsigned long instance_addr); +int megasas_sriov_start_heartbeat(struct megasas_instance *instance, + int initial); +void megasas_start_timer(struct megasas_instance *instance, + struct timer_list *timer, + void *fn, unsigned long interval); +extern struct megasas_mgmt_info megasas_mgmt_info; extern int resetwaittime; /** @@ -101,8 +96,10 @@ extern int resetwaittime; * @regs: MFI register set */ void -megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs) +megasas_enable_intr_fusion(struct megasas_instance *instance) { + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; /* For Thunderbolt/Invader also clear intr on enable */ writel(~0, ®s->outbound_intr_status); readl(®s->outbound_intr_status); @@ -111,6 +108,7 @@ megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs) /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); + instance->mask_interrupts = 0; } /** @@ -118,10 +116,13 @@ megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs) * @regs: MFI register set */ void -megasas_disable_intr_fusion(struct megasas_register_set __iomem *regs) +megasas_disable_intr_fusion(struct megasas_instance *instance) { u32 mask = 0xFFFFFFFF; u32 status; + struct megasas_register_set __iomem *regs; + regs = instance->reg_set; + instance->mask_interrupts = 1; writel(mask, ®s->outbound_intr_mask); /* Dummy readl to force pci flush */ @@ -556,12 +557,13 @@ fail_req_desc: * For polling, MFI requires the cmd_status to be set to 0xFF before posting. */ int -wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd) +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, + int seconds) { int i; struct megasas_header *frame_hdr = &cmd->frame->hdr; - u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; + u32 msecs = seconds * 1000; /* * Wait for cmd_status to change @@ -592,7 +594,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) struct megasas_cmd *cmd; u8 ret; struct fusion_context *fusion; - union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; + union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; int i; struct megasas_header *frame_hdr; @@ -622,42 +624,52 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; - IOCInitMessage->MsgVersion = MPI2_VERSION; - IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; - IOCInitMessage->SystemRequestFrameSize = - MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; - - IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; - IOCInitMessage->ReplyDescriptorPostQueueAddress = - fusion->reply_frames_desc_phys; - IOCInitMessage->SystemRequestFrameBaseAddress = - fusion->io_request_frames_phys; + IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); + IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); + IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); + + IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); + IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys); + IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); IOCInitMessage->HostMSIxVectors = instance->msix_vectors; init_frame = (struct megasas_init_frame *)cmd->frame; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); frame_hdr = &cmd->frame->hdr; frame_hdr->cmd_status = 0xFF; - frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; - init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; - init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); - - req_desc = - (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; - - req_desc->Words = cmd->frame_phys_addr; - req_desc->MFAIo.RequestFlags = + /* driver support Extended MSIX */ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) + init_frame->driver_operations. + mfi_capabilities.support_additional_msix = 1; + /* driver supports HA / Remote LUN over Fast Path interface */ + init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun + = 1; + /* Convert capability to LE32 */ + cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); + + init_frame->queue_info_new_phys_addr_hi = + cpu_to_le32(upper_32_bits(ioc_init_handle)); + init_frame->queue_info_new_phys_addr_lo = + cpu_to_le32(lower_32_bits(ioc_init_handle)); + init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); + + req_desc.Words = 0; + req_desc.MFAIo.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + cpu_to_le32s((u32 *)&req_desc.MFAIo); + req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr); /* * disable the intr before firing the init frame */ - instance->instancet->disable_intr(instance->reg_set); + instance->instancet->disable_intr(instance); for (i = 0; i < (10 * 1000); i += 20) { if (readl(&instance->reg_set->doorbell) & 1) @@ -666,10 +678,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) break; } - instance->instancet->fire_cmd(instance, req_desc->u.low, - req_desc->u.high, instance->reg_set); + instance->instancet->fire_cmd(instance, req_desc.u.low, + req_desc.u.high, instance->reg_set); - wait_and_poll(instance, cmd); + wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); frame_hdr = &cmd->frame->hdr; if (frame_hdr->cmd_status != 0) { @@ -720,7 +732,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance) if (!fusion) { megasas_return_cmd(instance, cmd); - return 1; + return -ENXIO; } dcmd = &cmd->frame->dcmd; @@ -743,13 +755,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = size_map_info; - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = size_map_info; + dcmd->data_xfer_len = cpu_to_le32(size_map_info); + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); if (!megasas_issue_polled(instance, cmd)) ret = 0; @@ -770,8 +782,7 @@ megasas_get_map_info(struct megasas_instance *instance) fusion->fast_path_io = 0; if (!megasas_get_ld_map_info(instance)) { - if (MR_ValidateMapInfo(fusion->ld_map[(instance->map_id & 1)], - fusion->load_balance_info)) { + if (MR_ValidateMapInfo(instance)) { fusion->fast_path_io = 1; return 0; } @@ -819,7 +830,7 @@ megasas_sync_map_info(struct megasas_instance *instance) map = fusion->ld_map[instance->map_id & 1]; - num_lds = map->raidMap.ldCount; + num_lds = le32_to_cpu(map->raidMap.ldCount); dcmd = &cmd->frame->dcmd; @@ -847,15 +858,15 @@ megasas_sync_map_info(struct megasas_instance *instance) dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; - dcmd->flags = MFI_FRAME_DIR_WRITE; + dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); dcmd->timeout = 0; dcmd->pad_0 = 0; - dcmd->data_xfer_len = size_map_info; + dcmd->data_xfer_len = cpu_to_le32(size_map_info); dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; - dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; - dcmd->sgl.sge32[0].phys_addr = ci_h; - dcmd->sgl.sge32[0].length = size_map_info; + dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); + dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); + dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); instance->map_update_cmd = cmd; @@ -864,6 +875,66 @@ megasas_sync_map_info(struct megasas_instance *instance) return ret; } +/* + * meagasas_display_intel_branding - Display branding string + * @instance: per adapter object + * + * Return nothing. + */ +static void +megasas_display_intel_branding(struct megasas_instance *instance) +{ + if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (instance->pdev->device) { + case PCI_DEVICE_ID_LSI_INVADER: + switch (instance->pdev->subsystem_device) { + case MEGARAID_INTEL_RS3DC080_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3DC080_BRANDING); + break; + case MEGARAID_INTEL_RS3DC040_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3DC040_BRANDING); + break; + case MEGARAID_INTEL_RS3SC008_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3SC008_BRANDING); + break; + case MEGARAID_INTEL_RS3MC044_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3MC044_BRANDING); + break; + default: + break; + } + break; + case PCI_DEVICE_ID_LSI_FURY: + switch (instance->pdev->subsystem_device) { + case MEGARAID_INTEL_RS3WC080_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3WC080_BRANDING); + break; + case MEGARAID_INTEL_RS3WC040_SSDID: + dev_info(&instance->pdev->dev, "scsi host %d: %s\n", + instance->host->host_no, + MEGARAID_INTEL_RS3WC040_BRANDING); + break; + default: + break; + } + break; + default: + break; + } +} + /** * megasas_init_adapter_fusion - Initializes the FW * @instance: Adapter soft state @@ -944,6 +1015,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) if (megasas_ioc_init_fusion(instance)) goto fail_ioc_init; + megasas_display_intel_branding(instance); + instance->flag_ieee = 1; fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) + @@ -996,9 +1069,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, spin_lock_irqsave(&instance->hba_lock, flags); - writel(req_desc_lo, - &(regs)->inbound_low_queue_port); - writel(req_desc_hi, &(regs)->inbound_high_queue_port); + writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); + writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); spin_unlock_irqrestore(&instance->hba_lock, flags); } @@ -1071,7 +1143,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, fusion = instance->ctrl_context; - if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; @@ -1085,10 +1158,11 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, return sge_count; scsi_for_each_sg(scp, os_sgl, sge_count, i) { - sgl_ptr->Length = sg_dma_len(os_sgl); - sgl_ptr->Address = sg_dma_address(os_sgl); + sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); + sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); sgl_ptr->Flags = 0; - if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { if (i == sge_count - 1) sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; } @@ -1100,11 +1174,13 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, (sge_count > fusion->max_sge_in_main_msg)) { struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; - if (instance->pdev->device == - PCI_DEVICE_ID_LSI_INVADER) { - if ((cmd->io_request->IoFlags & - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_FURY)) { + if ((le16_to_cpu(cmd->io_request->IoFlags) & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) cmd->io_request->ChainOffset = fusion-> chain_offset_io_request; @@ -1117,16 +1193,17 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, sg_chain = sgl_ptr; /* Prepare chain element */ sg_chain->NextChainOffset = 0; - if (instance->pdev->device == - PCI_DEVICE_ID_LSI_INVADER) + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_FURY)) sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; else sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); - sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) - *(sge_count - sg_processed)); - sg_chain->Address = cmd->sg_frame_phys_addr; + sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); + sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); sgl_ptr = (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; @@ -1184,7 +1261,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, io_request->CDB.EEDP32.PrimaryReferenceTag = cpu_to_be32(ref_tag); io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; - io_request->IoFlags = 32; /* Specify 32-byte cdb */ + io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ /* Transfer length */ cdb[28] = (u8)((num_blocks >> 24) & 0xff); @@ -1194,19 +1271,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, /* set SCSI IO EEDPFlags */ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { - io_request->EEDPFlags = + io_request->EEDPFlags = cpu_to_le16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); } else { - io_request->EEDPFlags = + io_request->EEDPFlags = cpu_to_le16( MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); } - io_request->Control |= (0x4 << 26); - io_request->EEDPBlockSize = scp->device->sector_size; + io_request->Control |= cpu_to_le32((0x4 << 26)); + io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); } else { /* Some drives don't support 16/12 byte CDB's, convert to 10 */ if (((cdb_len == 12) || (cdb_len == 16)) && @@ -1234,7 +1311,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, cdb[8] = (u8)(num_blocks & 0xff); cdb[7] = (u8)((num_blocks >> 8) & 0xff); - io_request->IoFlags = 10; /* Specify 10-byte cdb */ + io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ cdb_len = 10; } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { /* Convert to 16 byte CDB for large LBA's */ @@ -1272,7 +1349,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, cdb[11] = (u8)((num_blocks >> 16) & 0xff); cdb[10] = (u8)((num_blocks >> 24) & 0xff); - io_request->IoFlags = 16; /* Specify 16-byte cdb */ + io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ cdb_len = 16; } @@ -1333,13 +1410,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, struct IO_REQUEST_INFO io_info; struct fusion_context *fusion; struct MR_FW_RAID_MAP_ALL *local_map_ptr; + u8 *raidLUN; device_id = MEGASAS_DEV_INDEX(instance, scp); fusion = instance->ctrl_context; io_request = cmd->io_request; - io_request->RaidContext.VirtualDiskTgtId = device_id; + io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); io_request->RaidContext.status = 0; io_request->RaidContext.exStatus = 0; @@ -1403,7 +1481,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; io_info.numBlocks = datalength; io_info.ldTgtId = device_id; - io_request->DataLength = scsi_bufflen(scp); + io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) io_info.isRead = 1; @@ -1417,7 +1495,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, } else { if (MR_BuildRaidContext(instance, &io_info, &io_request->RaidContext, - local_map_ptr)) + local_map_ptr, &raidLUN)) fp_possible = io_info.fpOkForIo; } @@ -1434,7 +1512,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = @@ -1442,8 +1521,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); io_request->RaidContext.Type = MPI2_TYPE_CUDA; io_request->RaidContext.nseg = 0x1; - io_request->IoFlags |= - MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); io_request->RaidContext.regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE); @@ -1459,13 +1537,16 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; io_request->DevHandle = io_info.devHandle; + /* populate the LUN field */ + memcpy(io_request->LUN, raidLUN, 8); } else { io_request->RaidContext.timeoutValue = - local_map_ptr->raidMap.fpPdIoTimeoutSec; + cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED) cmd->request_desc->SCSIIO.RequestFlags = @@ -1478,7 +1559,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->RaidContext.nseg = 0x1; } io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = device_id; + io_request->DevHandle = cpu_to_le16(device_id); } /* Not FP */ } @@ -1500,6 +1581,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, u16 pd_index = 0; struct MR_FW_RAID_MAP_ALL *local_map_ptr; struct fusion_context *fusion = instance->ctrl_context; + u8 span, physArm; + u16 devHandle; + u32 ld, arRef, pd; + struct MR_LD_RAID *raid; + struct RAID_CONTEXT *pRAID_Context; io_request = cmd->io_request; device_id = MEGASAS_DEV_INDEX(instance, scmd); @@ -1507,6 +1593,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, +scmd->device->id; local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; + io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + + /* Check if this is a system PD I/O */ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { @@ -1522,21 +1611,86 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) + io_request->IoFlags |= cpu_to_le16( + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); cmd->request_desc->SCSIIO.DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; + cmd->request_desc->SCSIIO.MSIxIndex = + instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; + /* + * If the command is for the tape device, set the + * FP timeout to the os layer timeout value. + */ + if (scmd->device->type == TYPE_TAPE) { + if ((scmd->request->timeout / HZ) > 0xFFFF) + io_request->RaidContext.timeoutValue = + 0xFFFF; + else + io_request->RaidContext.timeoutValue = + scmd->request->timeout / HZ; + } } else { + if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) + goto NonFastPath; + + ld = MR_TargetIdToLdGet(device_id, local_map_ptr); + if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) + goto NonFastPath; + + raid = MR_LdRaidGet(ld, local_map_ptr); + + /* check if this LD is FP capable */ + if (!(raid->capability.fpNonRWCapable)) + /* not FP capable, send as non-FP */ + goto NonFastPath; + + /* get RAID_Context pointer */ + pRAID_Context = &io_request->RaidContext; + + /* set RAID context values */ + pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; + pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; + pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); + pRAID_Context->regLockRowLBA = 0; + pRAID_Context->regLockLength = 0; + pRAID_Context->configSeqNum = raid->seqNum; + + /* get the DevHandle for the PD (since this is + fpNonRWCapable, this is a single disk RAID0) */ + span = physArm = 0; + arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); + pd = MR_ArPdGet(arRef, physArm, local_map_ptr); + devHandle = MR_PdDevHandleGet(pd, local_map_ptr); + + /* build request descriptor */ + cmd->request_desc->SCSIIO.RequestFlags = + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + cmd->request_desc->SCSIIO.DevHandle = devHandle; + + /* populate the LUN field */ + memcpy(io_request->LUN, raid->LUN, 8); + + /* build the raidScsiIO structure */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + io_request->DevHandle = devHandle; + + return; + +NonFastPath: io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = device_id; + io_request->DevHandle = cpu_to_le16(device_id); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } - io_request->RaidContext.VirtualDiskTgtId = device_id; + io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); io_request->LUN[1] = scmd->device->lun; - io_request->DataLength = scsi_bufflen(scmd); } /** @@ -1575,7 +1729,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, * Just the CDB length,rest of the Flags are zero * This will be modified for FP in build_ldio_fusion */ - io_request->IoFlags = scp->cmd_len; + io_request->IoFlags = cpu_to_le16(scp->cmd_len); if (megasas_is_ldio(scp)) megasas_build_ldio_fusion(instance, scp, cmd); @@ -1600,17 +1754,17 @@ megasas_build_io_fusion(struct megasas_instance *instance, io_request->RaidContext.numSGE = sge_count; - io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; + io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); if (scp->sc_data_direction == PCI_DMA_TODEVICE) - io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) - io_request->Control |= MPI2_SCSIIO_CONTROL_READ; + io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); io_request->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; - io_request->SenseBufferLowAddress = cmd->sense_phys_addr; + io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; cmd->scmd = scp; @@ -1627,7 +1781,8 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) if (index >= instance->max_fw_cmds) { printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for " - "descriptor\n", index); + "descriptor for scsi%d\n", index, + instance->host->host_no); return NULL; } fusion = instance->ctrl_context; @@ -1675,7 +1830,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, } req_desc = cmd->request_desc; - req_desc->SCSIIO.SMID = index; + req_desc->SCSIIO.SMID = cpu_to_le16(index); if (cmd->io_request->ChainOffset != 0 && cmd->io_request->ChainOffset != 0xF) @@ -1737,7 +1892,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) num_completed = 0; while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { - smid = reply_desc->SMID; + smid = le16_to_cpu(reply_desc->SMID); cmd_fusion = fusion->cmd_list[smid - 1]; @@ -1825,8 +1980,15 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) return IRQ_NONE; wmb(); - writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex], - &instance->reg_set->reply_post_host_index); + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) + writel(((MSIxIndex & 0x7) << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[MSIxIndex/8]); + else + writel((MSIxIndex << 24) | + fusion->last_reply_idx[MSIxIndex], + instance->reply_post_host_index_addr[0]); megasas_check_and_restore_queue_depth(instance); return IRQ_HANDLED; } @@ -1868,6 +2030,9 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) struct megasas_instance *instance = irq_context->instance; u32 mfiStatus, fw_state; + if (instance->mask_interrupts) + return IRQ_NONE; + if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance->reg_set); if (!mfiStatus) @@ -1885,8 +2050,11 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) /* If we didn't complete any commands, check for FW fault */ fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; - if (fw_state == MFI_STATE_FAULT) + if (fw_state == MFI_STATE_FAULT) { + printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt" + "for scsi%d\n", instance->host->host_no); schedule_work(&instance->work_init); + } } return IRQ_HANDLED; @@ -1929,7 +2097,8 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, fusion = instance->ctrl_context; io_req = cmd->io_request; - if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; sgl_ptr_end += fusion->max_sge_in_main_msg - 1; @@ -1944,12 +2113,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, SGL) / 4; io_req->ChainOffset = fusion->chain_offset_mfi_pthru; - mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; + mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; - mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; + mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME); return 0; } @@ -1982,7 +2151,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); - req_desc->SCSIIO.SMID = index; + req_desc->SCSIIO.SMID = cpu_to_le16(index); return req_desc; } @@ -2056,9 +2225,10 @@ megasas_check_reset_fusion(struct megasas_instance *instance, } /* This function waits for outstanding commands on fusion to complete */ -int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) +int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, + int iotimeout, int *convert) { - int i, outstanding, retval = 0; + int i, outstanding, retval = 0, hb_seconds_missed = 0; u32 fw_state; for (i = 0; i < resetwaittime; i++) { @@ -2067,18 +2237,49 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) instance->reg_set) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { printk(KERN_WARNING "megasas: Found FW in FAULT state," - " will reset adapter.\n"); + " will reset adapter scsi%d.\n", + instance->host->host_no); + retval = 1; + goto out; + } + /* If SR-IOV VF mode & heartbeat timeout, don't wait */ + if (instance->requestorId && !iotimeout) { retval = 1; goto out; } + /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ + if (instance->requestorId && iotimeout) { + if (instance->hb_host_mem->HB.fwCounter != + instance->hb_host_mem->HB.driverCounter) { + instance->hb_host_mem->HB.driverCounter = + instance->hb_host_mem->HB.fwCounter; + hb_seconds_missed = 0; + } else { + hb_seconds_missed++; + if (hb_seconds_missed == + (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { + printk(KERN_WARNING "megasas: SR-IOV:" + " Heartbeat never completed " + " while polling during I/O " + " timeout handling for " + "scsi%d.\n", + instance->host->host_no); + *convert = 1; + retval = 1; + goto out; + } + } + } + outstanding = atomic_read(&instance->fw_outstanding); if (!outstanding) goto out; if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { printk(KERN_NOTICE "megasas: [%2d]waiting for %d " - "commands to complete\n", i, outstanding); + "commands to complete for scsi%d\n", i, + outstanding, instance->host->host_no); megasas_complete_cmd_dpc_fusion( (unsigned long)instance); } @@ -2087,7 +2288,8 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) if (atomic_read(&instance->fw_outstanding)) { printk("megaraid_sas: pending commands remain after waiting, " - "will reset adapter.\n"); + "will reset adapter scsi%d.\n", + instance->host->host_no); retval = 1; } out: @@ -2109,10 +2311,34 @@ void megasas_reset_reply_desc(struct megasas_instance *instance) reply_desc->Words = ULLONG_MAX; } +/* Check for a second path that is currently UP */ +int megasas_check_mpio_paths(struct megasas_instance *instance, + struct scsi_cmnd *scmd) +{ + int i, j, retval = (DID_RESET << 16); + + if (instance->mpio && instance->requestorId) { + for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++) + for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++) + if (megasas_mgmt_info.instance[i] && + (megasas_mgmt_info.instance[i] != instance) && + megasas_mgmt_info.instance[i]->mpio && + megasas_mgmt_info.instance[i]->requestorId + && + (megasas_mgmt_info.instance[i]->ld_ids[j] + == scmd->device->id)) { + retval = (DID_NO_CONNECT << 16); + goto out; + } + } +out: + return retval; +} + /* Core fusion reset function */ -int megasas_reset_fusion(struct Scsi_Host *shost) +int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) { - int retval = SUCCESS, i, j, retry = 0; + int retval = SUCCESS, i, j, retry = 0, convert = 0; struct megasas_instance *instance; struct megasas_cmd_fusion *cmd_fusion; struct fusion_context *fusion; @@ -2123,28 +2349,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost) instance = (struct megasas_instance *)shost->hostdata; fusion = instance->ctrl_context; + mutex_lock(&instance->reset_mutex); + if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_WARNING "megaraid_sas: Hardware critical error, " - "returning FAILED.\n"); + "returning FAILED for scsi%d.\n", + instance->host->host_no); return FAILED; } - mutex_lock(&instance->reset_mutex); + if (instance->requestorId && !instance->skip_heartbeat_timer_del) + del_timer_sync(&instance->sriov_heartbeat_timer); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); - instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; - instance->instancet->disable_intr(instance->reg_set); + instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING; + instance->instancet->disable_intr(instance); msleep(1000); /* First try waiting for commands to complete */ - if (megasas_wait_for_outstanding_fusion(instance)) { + if (megasas_wait_for_outstanding_fusion(instance, iotimeout, + &convert)) { + instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; printk(KERN_WARNING "megaraid_sas: resetting fusion " - "adapter.\n"); + "adapter scsi%d.\n", instance->host->host_no); + if (convert) + iotimeout = 0; + /* Now return commands back to the OS */ for (i = 0 ; i < instance->max_fw_cmds; i++) { cmd_fusion = fusion->cmd_list[i]; if (cmd_fusion->scmd) { scsi_dma_unmap(cmd_fusion->scmd); - cmd_fusion->scmd->result = (DID_RESET << 16); + cmd_fusion->scmd->result = + megasas_check_mpio_paths(instance, + cmd_fusion->scmd); cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); megasas_return_cmd_fusion(instance, cmd_fusion); atomic_dec(&instance->fw_outstanding); @@ -2159,13 +2396,67 @@ int megasas_reset_fusion(struct Scsi_Host *shost) (abs_state == MFI_STATE_FAULT && !reset_adapter)) { /* Reset not supported, kill adapter */ printk(KERN_WARNING "megaraid_sas: Reset not supported" - ", killing adapter.\n"); + ", killing adapter scsi%d.\n", + instance->host->host_no); megaraid_sas_kill_hba(instance); + instance->skip_heartbeat_timer_del = 1; instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; retval = FAILED; goto out; } + /* Let SR-IOV VF & PF sync up if there was a HB failure */ + if (instance->requestorId && !iotimeout) { + msleep(MEGASAS_OCR_SETTLE_TIME_VF); + /* Look for a late HB update after VF settle time */ + if (abs_state == MFI_STATE_OPERATIONAL && + (instance->hb_host_mem->HB.fwCounter != + instance->hb_host_mem->HB.driverCounter)) { + instance->hb_host_mem->HB.driverCounter = + instance->hb_host_mem->HB.fwCounter; + printk(KERN_WARNING "megasas: SR-IOV:" + "Late FW heartbeat update for " + "scsi%d.\n", + instance->host->host_no); + } else { + /* In VF mode, first poll for FW ready */ + for (i = 0; + i < (MEGASAS_RESET_WAIT_TIME * 1000); + i += 20) { + status_reg = + instance->instancet-> + read_fw_status_reg( + instance->reg_set); + abs_state = status_reg & + MFI_STATE_MASK; + if (abs_state == MFI_STATE_READY) { + printk(KERN_WARNING "megasas" + ": SR-IOV: FW was found" + "to be in ready state " + "for scsi%d.\n", + instance->host->host_no); + break; + } + msleep(20); + } + if (abs_state != MFI_STATE_READY) { + printk(KERN_WARNING "megasas: SR-IOV: " + "FW not in ready state after %d" + " seconds for scsi%d, status_reg = " + "0x%x.\n", + MEGASAS_RESET_WAIT_TIME, + instance->host->host_no, + status_reg); + megaraid_sas_kill_hba(instance); + instance->skip_heartbeat_timer_del = 1; + instance->adprecovery = + MEGASAS_HW_CRITICAL_ERROR; + retval = FAILED; + goto out; + } + } + } + /* Now try to reset the chip */ for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, @@ -2192,7 +2483,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost) readl(&instance->reg_set->fusion_host_diag); if (retry++ == 100) { printk(KERN_WARNING "megaraid_sas: " - "Host diag unlock failed!\n"); + "Host diag unlock failed! " + "for scsi%d\n", + instance->host->host_no); break; } } @@ -2214,7 +2507,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost) if (retry++ == 1000) { printk(KERN_WARNING "megaraid_sas: " "Diag reset adapter never " - "cleared!\n"); + "cleared for scsi%d!\n", + instance->host->host_no); break; } } @@ -2236,29 +2530,29 @@ int megasas_reset_fusion(struct Scsi_Host *shost) if (abs_state <= MFI_STATE_FW_INIT) { printk(KERN_WARNING "megaraid_sas: firmware " "state < MFI_STATE_FW_INIT, state = " - "0x%x\n", abs_state); + "0x%x for scsi%d\n", abs_state, + instance->host->host_no); continue; } /* Wait for FW to become ready */ if (megasas_transition_to_ready(instance, 1)) { printk(KERN_WARNING "megaraid_sas: Failed to " - "transition controller to ready.\n"); + "transition controller to ready " + "for scsi%d.\n", + instance->host->host_no); continue; } megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { printk(KERN_WARNING "megaraid_sas: " - "megasas_ioc_init_fusion() failed!\n"); + "megasas_ioc_init_fusion() failed!" + " for scsi%d\n", + instance->host->host_no); continue; } - clear_bit(MEGASAS_FUSION_IN_RESET, - &instance->reset_flags); - instance->instancet->enable_intr(instance->reg_set); - instance->adprecovery = MEGASAS_HBA_OPERATIONAL; - /* Re-fire management commands */ for (j = 0 ; j < instance->max_fw_cmds; j++) { cmd_fusion = fusion->cmd_list[j]; @@ -2268,7 +2562,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost) instance-> cmd_list[cmd_fusion->sync_cmd_idx]; if (cmd_mfi->frame->dcmd.opcode == - MR_DCMD_LD_MAP_GET_INFO) { + cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) { megasas_return_cmd(instance, cmd_mfi); megasas_return_cmd_fusion( @@ -2279,11 +2573,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost) instance, cmd_mfi->context.smid -1); - if (!req_desc) + if (!req_desc) { printk(KERN_WARNING "req_desc NULL" - "\n"); - else { + " for scsi%d\n", + instance->host->host_no); + /* Return leaked MPT + frame */ + megasas_return_cmd_fusion(instance, cmd_fusion); + } else { instance->instancet-> fire_cmd(instance, req_desc-> @@ -2297,6 +2595,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost) } } + clear_bit(MEGASAS_FUSION_IN_RESET, + &instance->reset_flags); + instance->instancet->enable_intr(instance); + instance->adprecovery = MEGASAS_HBA_OPERATIONAL; + /* Reset load balance info */ memset(fusion->load_balance_info, 0, sizeof(struct LD_LOAD_BALANCE_INFO) @@ -2305,20 +2608,41 @@ int megasas_reset_fusion(struct Scsi_Host *shost) if (!megasas_get_map_info(instance)) megasas_sync_map_info(instance); + /* Restart SR-IOV heartbeat */ + if (instance->requestorId) { + if (!megasas_sriov_start_heartbeat(instance, 0)) + megasas_start_timer(instance, + &instance->sriov_heartbeat_timer, + megasas_sriov_heartbeat_handler, + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + else + instance->skip_heartbeat_timer_del = 1; + } + /* Adapter reset completed successfully */ printk(KERN_WARNING "megaraid_sas: Reset " - "successful.\n"); + "successful for scsi%d.\n", + instance->host->host_no); retval = SUCCESS; goto out; } /* Reset failed, kill the adapter */ printk(KERN_WARNING "megaraid_sas: Reset failed, killing " - "adapter.\n"); + "adapter scsi%d.\n", instance->host->host_no); megaraid_sas_kill_hba(instance); + instance->skip_heartbeat_timer_del = 1; + instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; retval = FAILED; } else { + /* For VF: Restart HB timer if we didn't OCR */ + if (instance->requestorId) { + megasas_start_timer(instance, + &instance->sriov_heartbeat_timer, + megasas_sriov_heartbeat_handler, + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + } clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); - instance->instancet->enable_intr(instance->reg_set); + instance->instancet->enable_intr(instance); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; } out: @@ -2333,7 +2657,7 @@ void megasas_fusion_ocr_wq(struct work_struct *work) struct megasas_instance *instance = container_of(work, struct megasas_instance, work_init); - megasas_reset_fusion(instance->host); + megasas_reset_fusion(instance->host, 0); } struct megasas_instance_template megasas_instance_template_fusion = { diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index f68a3cd11d5..e76af5459a0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -43,7 +43,7 @@ #define HOST_DIAG_WRITE_ENABLE 0x80 #define HOST_DIAG_RESET_ADAPTER 0x4 #define MEGASAS_FUSION_MAX_RESET_TRIES 3 -#define MAX_MSIX_QUEUES_FUSION 16 +#define MAX_MSIX_QUEUES_FUSION 128 /* Invader defines */ #define MPI2_TYPE_CUDA 0x2 @@ -62,6 +62,9 @@ #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60 +#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) + /* * Raid context flags */ @@ -85,13 +88,18 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FUSION_IN_RESET 0 /* - * Raid Context structure which describes MegaRAID specific IO Paramenters + * Raid Context structure which describes MegaRAID specific IO Parameters * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames */ struct RAID_CONTEXT { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 nseg:4; + u8 Type:4; +#else u8 Type:4; u8 nseg:4; +#endif u8 resvd0; u16 timeoutValue; u8 regLockFlags; @@ -295,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST { * MPT RAID MFA IO Descriptor. */ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 MessageAddress1:24; /* bits 31:8*/ + u32 RequestFlags:8; +#else u32 RequestFlags:8; u32 MessageAddress1:24; /* bits 31:8*/ +#endif u32 MessageAddress2; /* bits 61:32 */ }; @@ -460,6 +473,7 @@ struct MPI2_IOC_INIT_REQUEST { /* mrpriv defines */ #define MR_PD_INVALID 0xFFFF #define MAX_SPAN_DEPTH 8 +#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) #define MAX_ROW_SIZE 32 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) @@ -471,6 +485,9 @@ struct MPI2_IOC_INIT_REQUEST { #define MAX_PHYSICAL_DEVICES 256 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 +#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/ +#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200 +#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 struct MR_DEV_HANDLE_INFO { u16 curDevHdl; @@ -501,7 +518,9 @@ struct MR_LD_SPAN { u64 startBlk; u64 numBlks; u16 arrayRef; - u8 reserved[6]; + u8 spanRowSize; + u8 spanRowDataSize; + u8 reserved[4]; }; struct MR_SPAN_BLOCK_INFO { @@ -512,6 +531,19 @@ struct MR_SPAN_BLOCK_INFO { struct MR_LD_RAID { struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 reserved4:7; + u32 fpNonRWCapable:1; + u32 fpReadAcrossStripe:1; + u32 fpWriteAcrossStripe:1; + u32 fpReadCapable:1; + u32 fpWriteCapable:1; + u32 encryptionType:8; + u32 pdPiMode:4; + u32 ldPiMode:4; + u32 reserved5:3; + u32 fpCapable:1; +#else u32 fpCapable:1; u32 reserved5:3; u32 ldPiMode:4; @@ -521,7 +553,9 @@ struct MR_LD_RAID { u32 fpReadCapable:1; u32 fpWriteAcrossStripe:1; u32 fpReadAcrossStripe:1; - u32 reserved4:8; + u32 fpNonRWCapable:1; + u32 reserved4:7; +#endif } capability; u32 reserved6; u64 size; @@ -545,7 +579,9 @@ struct MR_LD_RAID { u32 reserved:31; } flags; - u8 reserved3[0x5C]; + u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ + u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ + u8 reserved3[0x80-0x2D]; /* 0x2D */ }; struct MR_LD_SPAN_MAP { @@ -587,6 +623,10 @@ struct IO_REQUEST_INFO { u16 devHandle; u64 pdBlock; u8 fpOkForIo; + u8 IoforUnevenSpan; + u8 start_span; + u8 reserved; + u64 start_row; }; struct MR_LD_TARGET_SYNC { @@ -648,6 +688,26 @@ struct LD_LOAD_BALANCE_INFO { u64 last_accessed_block[2]; }; +/* SPAN_SET is info caclulated from span info from Raid map per LD */ +typedef struct _LD_SPAN_SET { + u64 log_start_lba; + u64 log_end_lba; + u64 span_row_start; + u64 span_row_end; + u64 data_strip_start; + u64 data_strip_end; + u64 data_row_start; + u64 data_row_end; + u8 strip_offset[MAX_SPAN_DEPTH]; + u32 span_row_data_width; + u32 diff; + u32 reserved[2]; +} LD_SPAN_SET, *PLD_SPAN_SET; + +typedef struct LOG_BLOCK_SPAN_INFO { + LD_SPAN_SET span_set[MAX_SPAN_DEPTH]; +} LD_SPAN_INFO, *PLD_SPAN_INFO; + struct MR_FW_RAID_MAP_ALL { struct MR_FW_RAID_MAP raidMap; struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1]; @@ -692,6 +752,7 @@ struct fusion_context { u32 map_sz; u8 fast_path_io; struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES]; + LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES]; }; union desc_value { diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index e960f9625c7..7b14a015c90 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.25 + * mpi2.h Version: 02.00.28 * * Version History * --------------- @@ -75,6 +75,9 @@ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -100,7 +103,7 @@ #define MPI2_VERSION_02_00 (0x0200) /* versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x19) +#define MPI2_HEADER_VERSION_UNIT (0x1C) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index 737fa8cfb54..88cb7f828bb 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2011 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_cnfg.h * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.22 + * mpi2_cnfg.h Version: 02.00.23 * * Version History * --------------- @@ -149,6 +149,8 @@ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. * Added UEFIVersion field to BIOS Page 1 and defined new * BiosOptions bits. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. * -------------------------------------------------------------------------- */ @@ -698,6 +700,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) /* defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) @@ -1224,6 +1227,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 #define MPI2_BIOSPAGE1_PAGEVERSION (0x05) /* values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) + #define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) #define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) #define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h index 38c5da39814..9d284dae655 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_init.h * Title: MPI SCSI initiator mode messages and structures * Creation Date: June 23, 2006 * - * mpi2_init.h Version: 02.00.13 + * mpi2_init.h Version: 02.00.14 * * Version History * --------------- @@ -36,6 +36,7 @@ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command * Priority to match SAM-4. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. * -------------------------------------------------------------------------- */ @@ -189,6 +190,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST #define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) #define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) +#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24) #define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) #define MPI2_SCSIIO_CONTROL_READ (0x02000000) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index b0d4760bb17..d159c5f24aa 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_ioc.h * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.21 + * mpi2_ioc.h Version: 02.00.22 * * Version History * --------------- @@ -118,6 +118,9 @@ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. * -------------------------------------------------------------------------- */ @@ -284,6 +287,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY #define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) /* IOCExceptions */ +#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) #define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) #define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) @@ -624,7 +628,7 @@ typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS U8 RAIDOperation; /* 0x04 */ U8 PercentComplete; /* 0x05 */ U16 Reserved2; /* 0x06 */ - U32 Resereved3; /* 0x08 */ + U32 ElapsedSeconds; /* 0x08 */ } MPI2_EVENT_DATA_IR_OPERATION_STATUS, MPI2_POINTER PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, Mpi2EventDataIrOperationStatus_t, diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h index 2b38af213be..0d202a2c6db 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_raid.h * Title: MPI Integrated RAID messages and structures * Creation Date: April 26, 2007 * - * mpi2_raid.h Version: 02.00.08 + * mpi2_raid.h Version: 02.00.09 * * Version History * --------------- @@ -27,6 +27,8 @@ * related structures and defines. * Added product-specific range to RAID Action values. * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. * -------------------------------------------------------------------------- */ @@ -276,10 +278,13 @@ typedef struct _MPI2_RAID_VOL_INDICATOR U64 TotalBlocks; /* 0x00 */ U64 BlocksRemaining; /* 0x08 */ U32 Flags; /* 0x10 */ + U32 ElapsedSeconds; /* 0x14 */ } MPI2_RAID_VOL_INDICATOR, MPI2_POINTER PTR_MPI2_RAID_VOL_INDICATOR, Mpi2RaidVolIndicator_t, MPI2_POINTER pMpi2RaidVolIndicator_t; /* defines for RAID Volume Indicator Flags field */ +#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) + #define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) #define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) #define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) @@ -320,7 +325,7 @@ MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t; /* RAID Action Reply ActionData union */ typedef union _MPI2_RAID_ACTION_REPLY_DATA { - U32 Word[5]; + U32 Word[6]; MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; U16 VolDevHandle; U8 VolumeState; diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h index fdffde1ebc0..50b39ccd526 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_sas.h diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 3cbe677c688..11b2ac4e7c6 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2010 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_tool.h * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.07 + * mpi2_tool.h Version: 02.00.10 * * Version History * --------------- @@ -27,6 +27,8 @@ * Post Request. * 05-25-11 02.00.07 Added Flags field and related defines to * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. * -------------------------------------------------------------------------- */ @@ -270,7 +272,7 @@ typedef struct _MPI2_TOOLBOX_BEACON_REQUEST #define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) -/* Toolbox Diagnostic CLI Tool request message */ +/* MPI v2.0 Toolbox Diagnostic CLI Tool request message */ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { U8 Tool; /* 0x00 */ U8 Reserved1; /* 0x01 */ @@ -288,7 +290,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { U32 DataLength; /* 0x10 */ U8 DiagnosticCliCommand [MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH]; /* 0x14 */ - MPI2_SGE_SIMPLE_UNION SGL; /* 0x70 */ + MPI2_MPI_SGE_IO_UNION SGL; /* 0x70 */ } MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, MPI2_POINTER PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, Mpi2ToolboxDiagnosticCliRequest_t, diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h index cfde017bf16..0b128b68a5e 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_type.h diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index bcb23d28b3e..8b88118e20e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -80,10 +80,6 @@ static int msix_disable = -1; module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); -static int missing_delay[2] = {-1, -1}; -module_param_array(missing_delay, int, NULL, 0); -MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); - static int mpt2sas_fwfault_debug; MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " "and halt firmware - (default=0)"); @@ -132,7 +128,7 @@ static int mpt2sas_remove_dead_ioc_func(void *arg) pdev = ioc->pdev; if ((pdev == NULL)) return -1; - pci_stop_and_remove_bus_device(pdev); + pci_stop_and_remove_bus_device_locked(pdev); return 0; } @@ -772,10 +768,9 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, * @msix_index: MSIX table index supplied by the OS * @reply: reply message frame(lower 32bit addr) * - * Return 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Returns void. */ -static u8 +static void _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { Mpi2EventNotificationReply_t *mpi_reply; @@ -784,9 +779,9 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (!mpi_reply) - return 1; + return; if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) - return 1; + return; #ifdef CONFIG_SCSI_MPT2SAS_LOGGING _base_display_event_data(ioc, mpi_reply); #endif @@ -816,7 +811,7 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) /* ctl callback handler */ mpt2sas_ctl_event_callback(ioc, msix_index, reply); - return 1; + return; } /** @@ -1413,8 +1408,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) int i; u8 try_msix = 0; - INIT_LIST_HEAD(&ioc->reply_queue_list); - if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1493,6 +1486,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) if (pci_enable_device_mem(pdev)) { printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: " "failed\n", ioc->name); + ioc->bars = 0; return -ENODEV; } @@ -1501,6 +1495,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) MPT2SAS_DRIVER_NAME)) { printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: " "failed\n", ioc->name); + ioc->bars = 0; r = -ENODEV; goto out_fail; } @@ -1744,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) list_for_each_entry_safe(chain_req, next, &ioc->scsi_lookup[i].chain_list, tracker_list) { list_del_init(&chain_req->tracker_list); - list_add_tail(&chain_req->tracker_list, + list_add(&chain_req->tracker_list, &ioc->free_chain_list); } } ioc->scsi_lookup[i].cb_idx = 0xFF; ioc->scsi_lookup[i].scmd = NULL; ioc->scsi_lookup[i].direct_io = 0; - list_add_tail(&ioc->scsi_lookup[i].tracker_list, + list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); @@ -1769,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) /* hi-priority */ i = smid - ioc->hi_priority_smid; ioc->hpr_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->hpr_lookup[i].tracker_list, + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); } else if (smid <= ioc->hba_queue_depth) { /* internal queue */ i = smid - ioc->internal_smid; ioc->internal_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->internal_lookup[i].tracker_list, + list_add(&ioc->internal_lookup[i].tracker_list, &ioc->internal_free_list); } spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); @@ -2199,7 +2194,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) } /** - * _base_update_missing_delay - change the missing delay timers + * mpt2sas_base_update_missing_delay - change the missing delay timers * @ioc: per adapter object * @device_missing_delay: amount of time till device is reported missing * @io_missing_delay: interval IO is returned when there is a missing device @@ -2210,8 +2205,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) * delay, as well as the io missing delay. This should be called at driver * load time. */ -static void -_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc, +void +mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc, u16 device_missing_delay, u8 io_missing_delay) { u16 dmd, dmd_new, dmd_orignal; @@ -2507,23 +2502,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* reply free queue sizing - taking into account for 64 FW events */ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + /* calculate reply descriptor post queue depth */ + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1; /* align the reply post queue on the next 16 count boundary */ - if (!ioc->reply_free_queue_depth % 16) - ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16; - else - ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + - 32 - (ioc->reply_free_queue_depth % 16); + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += 16 - + (ioc->reply_post_queue_depth % 16); + + if (ioc->reply_post_queue_depth > facts->MaxReplyDescriptorPostQueueDepth) { - ioc->reply_post_queue_depth = min_t(u16, - (facts->MaxReplyDescriptorPostQueueDepth - - (facts->MaxReplyDescriptorPostQueueDepth % 16)), - (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16))); - ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16; - ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64; + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; } - dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: " "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, @@ -3940,11 +3937,15 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, &ioc->chip->HostDiagnostic); - /* don't access any registers for 50 milliseconds */ - msleep(50); + /* This delay allows the chip PCIe hardware time to finish reset tasks*/ + if (sleep_flag == CAN_SLEEP) + msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); + else + mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); - /* 300 second max wait */ - for (count = 0; count < 3000000 ; count++) { + /* Approximately 300 second max wait */ + for (count = 0; count < (300000000 / + MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { host_diagnostic = readl(&ioc->chip->HostDiagnostic); @@ -3953,11 +3954,13 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; - /* wait 100 msec */ + /* Wait to pass the second read delay window */ if (sleep_flag == CAN_SLEEP) - msleep(1); + msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC + /1000); else - mdelay(1); + mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC + /1000); } if (host_diagnostic & MPI2_DIAG_HCB_MODE) { @@ -4225,18 +4228,25 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); - _base_mask_interrupts(ioc); - ioc->shost_recovery = 1; - _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); - ioc->shost_recovery = 0; + if (ioc->chip_phys && ioc->chip) { + _base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + ioc->shost_recovery = 0; + } + _base_free_irq(ioc); _base_disable_msix(ioc); - if (ioc->chip_phys) + + if (ioc->chip_phys && ioc->chip) iounmap(ioc->chip); ioc->chip_phys = 0; - pci_release_selected_regions(ioc->pdev, ioc->bars); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); + + if (pci_is_enabled(pdev)) { + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } return; } @@ -4407,9 +4417,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) if (r) goto out_free_resources; - if (missing_delay[0] != -1 && missing_delay[1] != -1) - _base_update_missing_delay(ioc, missing_delay[0], - missing_delay[1]); ioc->non_operational_loop = 0; return 0; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 4caaac13682..fd3b998c75b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt2sas/mpt2_base.h - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -69,8 +69,8 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "14.100.00.00" -#define MPT2SAS_MAJOR_VERSION 14 +#define MPT2SAS_DRIVER_VERSION "16.100.00.00" +#define MPT2SAS_MAJOR_VERSION 16 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 #define MPT2SAS_RELEASE_VERSION 00 @@ -1055,14 +1055,17 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc); +void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay); + int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc); /* scsih shared API */ -u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, +void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, - ulong timeout, unsigned long serial_number, enum mutex_type m_type); + ulong timeout, enum mutex_type m_type); void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); @@ -1141,7 +1144,7 @@ void mpt2sas_ctl_exit(void); u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase); -u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, +void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, Mpi2EventNotificationReply_t *mpi_reply); diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c index 863778071a9..0c47425c73f 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_config.c +++ b/drivers/scsi/mpt2sas/mpt2sas_config.c @@ -2,7 +2,7 @@ * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt2sas/mpt2_base.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 08685c4cf23..62df8f9d427 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -397,18 +397,22 @@ mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc, * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * - * Return 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Returns void. */ -u8 +void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { Mpi2EventNotificationReply_t *mpi_reply; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } mpt2sas_ctl_add_to_event_log(ioc, mpi_reply); - return 1; + return; } /** @@ -505,19 +509,6 @@ _ctl_fasync(int fd, struct file *filep, int mode) } /** - * _ctl_release - - * @inode - - * @filep - - * - * Called when application releases the fasyn callback handler. - */ -static int -_ctl_release(struct inode *inode, struct file *filep) -{ - return fasync_helper(-1, filep, 0, &async_queue); -} - -/** * _ctl_poll - * @file - * @wait - @@ -996,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg, mpt2sas_scsih_issue_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, - 0, TM_MUTEX_ON); + TM_MUTEX_ON); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; } else mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, @@ -3027,7 +3018,6 @@ struct device_attribute *mpt2sas_dev_attrs[] = { static const struct file_operations ctl_fops = { .owner = THIS_MODULE, .unlocked_ioctl = _ctl_ioctl, - .release = _ctl_release, .poll = _ctl_poll, .fasync = _ctl_fasync, #ifdef CONFIG_COMPAT diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h index b5eb0d1b8ea..8b2ac1869dc 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h index 69cc7d0c112..a9021cbd662 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_debug.h +++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h @@ -2,7 +2,7 @@ * Logging Support for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c6bdc926722..5055f925d2c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2,7 +2,7 @@ * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -101,6 +101,10 @@ static ushort max_sectors = 0xFFFF; module_param(max_sectors, ushort, 0); MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); +static int missing_delay[2] = {-1, -1}; +module_param_array(missing_delay, int, NULL, 0); +MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); + /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ #define MPT2SAS_MAX_LUN (16895) static int max_lun = MPT2SAS_MAX_LUN; @@ -624,11 +628,12 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc, * devices while scanning is turned on due to an oops in * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() */ - if (!ioc->is_driver_loading) + if (!ioc->is_driver_loading) { mpt2sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); - _scsih_sas_device_remove(ioc, sas_device); + _scsih_sas_device_remove(ioc, sas_device); + } } } @@ -1398,6 +1403,7 @@ _scsih_slave_alloc(struct scsi_device *sdev) struct MPT2SAS_DEVICE *sas_device_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; + struct _sas_device *sas_device; unsigned long flags; sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); @@ -1426,6 +1432,19 @@ _scsih_slave_alloc(struct scsi_device *sdev) spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, + sas_target_priv_data->sas_address); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return 0; } @@ -2349,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task * @timeout: timeout in seconds - * @serial_number: the serial_number from scmd * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF * Context: user * @@ -2362,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout, - unsigned long serial_number, enum mutex_type m_type) + enum mutex_type m_type) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; @@ -2615,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd) handle = sas_device_priv_data->sas_target->handle; r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", @@ -2677,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, - TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", @@ -2738,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, - 30, 0, TM_MUTEX_ON); + 30, TM_MUTEX_ON); out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", @@ -3934,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { - struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct MPT2SAS_DEVICE *sas_device_priv_data; struct MPT2SAS_TARGET *sas_target_priv_data; struct _raid_device *raid_device; @@ -3944,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) u32 mpi_control; u16 smid; - scmd->scsi_done = done; sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -3994,11 +4009,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; } else -/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */ -/* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED; - */ - mpi_control |= (0x500); - + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; } else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; /* Make sure Device is not raid volume. @@ -4024,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) MPT_TARGET_FLAGS_RAID_COMPONENT) mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; else - mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; mpi_request->DevHandle = cpu_to_le16(sas_device_priv_data->sas_target->handle); mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); @@ -4068,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) return SCSI_MLQUEUE_HOST_BUSY; } -static DEF_SCSI_QCMD(_scsih_qcmd) - /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data * @sense_buffer: sense data returned by target @@ -5815,9 +5824,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc, u8 task_abort_retries; mutex_lock(&ioc->tm_cmds.mutex); - dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: phy number(%d), " - "width(%d)\n", ioc->name, __func__, event_data->PhyNum, - event_data->PortWidth)); + pr_info(MPT2SAS_FMT + "%s: enter: phy number(%d), width(%d)\n", + ioc->name, __func__, event_data->PhyNum, + event_data->PortWidth); _scsih_block_io_all_device(ioc); @@ -5864,7 +5874,7 @@ broadcast_aen_retry: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, - MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, @@ -5906,7 +5916,7 @@ broadcast_aen_retry: r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_OFF); + TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " @@ -6752,7 +6762,7 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc) handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(sas_device_pg0.DevHandle); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); @@ -6861,7 +6871,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(volume_pg1.DevHandle); @@ -6886,7 +6896,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc) phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); @@ -6966,7 +6976,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(expander_pg0.DevHandle); @@ -7093,20 +7103,28 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) struct _sas_device *sas_device; struct _sas_node *expander_device; static struct _raid_device *raid_device; + u8 retry_count; unsigned long flags; printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name); _scsih_sas_host_refresh(ioc); + printk(MPT2SAS_INFO_FMT "\tscan devices: expanders start\n", + ioc->name); /* expanders */ handle = 0xFFFF; while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; + } handle = le16_to_cpu(expander_pg0.DevHandle); spin_lock_irqsave(&ioc->sas_node_lock, flags); expander_device = mpt2sas_scsih_expander_find_by_sas_address( @@ -7115,13 +7133,26 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) if (expander_device) _scsih_refresh_expander_links(ioc, expander_device, handle); - else + else { + printk(MPT2SAS_INFO_FMT "\tBEFORE adding expander: " + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); _scsih_expander_add(ioc, handle); + printk(MPT2SAS_INFO_FMT "\tAFTER adding expander: " + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + } } + printk(MPT2SAS_INFO_FMT "\tscan devices: expanders complete\n", + ioc->name); + if (!ioc->ir_firmware) goto skip_to_sas; + printk(MPT2SAS_INFO_FMT "\tscan devices phys disk start\n", ioc->name); /* phys disk */ phys_disk_num = 0xFF; while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply, @@ -7129,8 +7160,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; + } phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); spin_lock_irqsave(&ioc->sas_device_lock, flags); @@ -7142,25 +7178,59 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle) != 0) continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { + printk(MPT2SAS_INFO_FMT "\tBEFORE adding phys disk: " + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); mpt2sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); set_bit(handle, ioc->pd_handles); - _scsih_add_device(ioc, handle, 0, 1); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 1)) { + ssleep(1); + } + printk(MPT2SAS_INFO_FMT "\tAFTER adding phys disk: " + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); } } + printk(MPT2SAS_INFO_FMT "\tscan devices: phys disk complete\n", + ioc->name); + + printk(MPT2SAS_INFO_FMT "\tscan devices: volumes start\n", ioc->name); /* volumes */ handle = 0xFFFF; while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply, &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); break; + } handle = le16_to_cpu(volume_pg1.DevHandle); spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_wwid(ioc, @@ -7172,18 +7242,38 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: " + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; element.VolDevHandle = volume_pg1.DevHandle; + printk(MPT2SAS_INFO_FMT "\tBEFORE adding volume: " + " handle (0x%04x)\n", ioc->name, + volume_pg1.DevHandle); _scsih_sas_volume_add(ioc, &element); + printk(MPT2SAS_INFO_FMT "\tAFTER adding volume: " + " handle (0x%04x)\n", ioc->name, + volume_pg1.DevHandle); } } + printk(MPT2SAS_INFO_FMT "\tscan devices: volumes complete\n", + ioc->name); + skip_to_sas: + printk(MPT2SAS_INFO_FMT "\tscan devices: end devices start\n", + ioc->name); /* sas devices */ handle = 0xFFFF; while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, @@ -7191,8 +7281,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:" + " ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } handle = le16_to_cpu(sas_device_pg0.DevHandle); if (!(_scsih_is_end_device( le32_to_cpu(sas_device_pg0.DeviceInfo)))) @@ -7205,12 +7300,31 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc) continue; parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { + printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: " + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); mpt2sas_transport_update_links(ioc, sas_address, handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); - _scsih_add_device(ioc, handle, 0, 0); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 0)) { + ssleep(1); + } + printk(MPT2SAS_INFO_FMT "\tAFTER adding end device: " + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); } } + printk(MPT2SAS_INFO_FMT "\tscan devices: end devices complete\n", + ioc->name); + printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name); } @@ -7303,7 +7417,9 @@ _firmware_event_work(struct work_struct *work) case MPT2SAS_PORT_ENABLE_COMPLETE: ioc->start_scan = 0; - + if (missing_delay[0] != -1 && missing_delay[1] != -1) + mpt2sas_base_update_missing_delay(ioc, missing_delay[0], + missing_delay[1]); dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete " "from worker thread\n", ioc->name)); @@ -7356,10 +7472,9 @@ _firmware_event_work(struct work_struct *work) * This function merely adds a new work task into ioc->firmware_event_thread. * The tasks are worked from _firmware_event_work in user context. * - * Return 1 meaning mf should be freed from _base_interrupt - * 0 means the mf is freed from this function. + * Returns void. */ -u8 +void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply) { @@ -7370,14 +7485,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, /* events turned off due to host reset or driver unloading */ if (ioc->remove_host || ioc->pci_error_recovery) - return 1; + return; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (unlikely(!mpi_reply)) { printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); - return 1; + return; } event = le16_to_cpu(mpi_reply->Event); @@ -7392,11 +7507,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, if (baen_data->Primitive != MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) - return 1; + return; if (ioc->broadcast_aen_busy) { ioc->broadcast_aen_pending++; - return 1; + return; } else ioc->broadcast_aen_busy = 1; break; @@ -7472,14 +7587,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, break; default: /* ignore the rest */ - return 1; + return; } fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); if (!fw_event) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); - return 1; + return; } sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; fw_event->event_data = kzalloc(sz, GFP_ATOMIC); @@ -7487,7 +7602,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); kfree(fw_event); - return 1; + return; } memcpy(fw_event->event_data, mpi_reply->EventData, @@ -7497,7 +7612,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, fw_event->VP_ID = mpi_reply->VP_ID; fw_event->event = event; _scsih_fw_event_add(ioc, fw_event); - return 1; + return; } /* shost template */ @@ -7596,10 +7711,6 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc) if (!ioc->ir_firmware) return; - /* are there any volumes ? */ - if (list_empty(&ioc->raid_device_list)) - return; - mutex_lock(&ioc->scsih_cmds.mutex); if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) { @@ -7814,10 +7925,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc) sas_device->sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { - if (!ioc->is_driver_loading) - mpt2sas_transport_port_remove(ioc, sas_address, + if (!ioc->is_driver_loading) { + mpt2sas_transport_port_remove(ioc, + sas_address, sas_address_parent); - _scsih_sas_device_remove(ioc, sas_device); + _scsih_sas_device_remove(ioc, sas_device); + } } } } @@ -7870,14 +7983,14 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc) kfree(sas_device); continue; } else if (!sas_device->starget) { - if (!ioc->is_driver_loading) + if (!ioc->is_driver_loading) { mpt2sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); - list_del(&sas_device->list); - kfree(sas_device); - continue; - + list_del(&sas_device->list); + kfree(sas_device); + continue; + } } spin_lock_irqsave(&ioc->sas_device_lock, flags); list_move_tail(&sas_device->list, &ioc->sas_device_list); @@ -8060,6 +8173,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); INIT_LIST_HEAD(&ioc->delayed_tr_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->reply_queue_list); /* init shost parameters */ shost->max_cmd_len = 32; @@ -8070,8 +8184,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (max_sectors != 0xFFFF) { if (max_sectors < 64) { shost->max_sectors = 64; - printk(MPT2SAS_WARN_FMT "Invalid value %d passed "\ - "for max_sectors, range is 64 to 32767. Assigning "\ + printk(MPT2SAS_WARN_FMT "Invalid value %d passed " + "for max_sectors, range is 64 to 32767. Assigning " "value of 64.\n", ioc->name, max_sectors); } else if (max_sectors > 32767) { shost->max_sectors = 32767; @@ -8165,6 +8279,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt2sas_base_stop_watchdog(ioc); scsi_block_requests(shost); + _scsih_ir_shutdown(ioc); device_state = pci_choose_state(pdev, state); printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering " "operating state [D%d]\n", ioc->name, pdev, @@ -8172,7 +8287,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt2sas_base_free_resources(ioc); pci_save_state(pdev); - pci_disable_device(pdev); pci_set_power_state(pdev, device_state); return 0; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 8c2ffbe6af0..410f4a3e888 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -2,7 +2,7 @@ * SAS Transport Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c - * Copyright (C) 2007-2012 LSI Corporation + * Copyright (C) 2007-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -1006,9 +1006,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, &mpt2sas_phy->remote_identify); _transport_add_phy_to_an_existing_port(ioc, sas_node, mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); - } else + } else { memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct sas_identify)); + _transport_del_phy_from_an_existing_port(ioc, sas_node, + mpt2sas_phy); + } if (mpt2sas_phy->phy) mpt2sas_phy->phy->negotiated_linkrate = @@ -1898,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); Mpi2SmpPassthroughRequest_t *mpi_request; Mpi2SmpPassthroughReply_t *mpi_reply; - int rc, i; + int rc; u16 smid; u32 ioc_state; unsigned long timeleft; @@ -1913,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, void *pci_addr_out = NULL; u16 wait_state_count; struct request *rsp = req->next_rq; - struct bio_vec *bvec = NULL; + struct bio_vec bvec; + struct bvec_iter iter; if (!rsp) { printk(MPT2SAS_ERR_FMT "%s: the smp response space is " @@ -1939,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, ioc->transport_cmds.status = MPT2_CMD_PENDING; /* Check if the request is split across multiple segments */ - if (req->bio->bi_vcnt > 1) { + if (bio_multiple_segments(req->bio)) { u32 offset = 0; /* Allocate memory and copy the request */ @@ -1952,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, goto out; } - bio_for_each_segment(bvec, req->bio, i) { + bio_for_each_segment(bvec, req->bio, iter) { memcpy(pci_addr_out + offset, - page_address(bvec->bv_page) + bvec->bv_offset, - bvec->bv_len); - offset += bvec->bv_len; + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; } } else { dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -1971,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* Check if the response needs to be populated across * multiple segments */ - if (rsp->bio->bi_vcnt > 1) { + if (bio_multiple_segments(rsp->bio)) { pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), &pci_dma_in); if (!pci_addr_in) { @@ -2038,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; - if (req->bio->bi_vcnt > 1) { + if (bio_multiple_segments(req->bio)) { ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4), pci_dma_out); } else { @@ -2054,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST); sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; - if (rsp->bio->bi_vcnt > 1) { + if (bio_multiple_segments(rsp->bio)) { ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4), pci_dma_in); } else { @@ -2099,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, le16_to_cpu(mpi_reply->ResponseDataLength); /* check if the resp needs to be copied from the allocated * pci mem */ - if (rsp->bio->bi_vcnt > 1) { + if (bio_multiple_segments(rsp->bio)) { u32 offset = 0; u32 bytes_to_copy = le16_to_cpu(mpi_reply->ResponseDataLength); - bio_for_each_segment(bvec, rsp->bio, i) { - if (bytes_to_copy <= bvec->bv_len) { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + offset, bytes_to_copy); break; } else { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + - offset, bvec->bv_len); - bytes_to_copy -= bvec->bv_len; + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; } - offset += bvec->bv_len; + offset += bvec.bv_len; } } } else { diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig index 81471bf415d..d53e1b02e89 100644 --- a/drivers/scsi/mpt3sas/Kconfig +++ b/drivers/scsi/mpt3sas/Kconfig @@ -2,7 +2,7 @@ # Kernel configuration file for the MPT3SAS # # This code is based on drivers/scsi/mpt3sas/Kconfig -# Copyright (C) 2012 LSI Corporation +# Copyright (C) 2012-2013 LSI Corporation # (mailto:DL-MPTFusionLinux@lsi.com) # This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile index 4c1d2e7a117..efb0c4c2e31 100644 --- a/drivers/scsi/mpt3sas/Makefile +++ b/drivers/scsi/mpt3sas/Makefile @@ -1,5 +1,5 @@ # mpt3sas makefile -obj-m += mpt3sas.o +obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o mpt3sas-y += mpt3sas_base.o \ mpt3sas_config.o \ mpt3sas_scsih.o \ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index 03317ffea62..20da8f907c0 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.26 + * mpi2.h Version: 02.00.29 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -82,6 +82,10 @@ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. * Added Hard Reset delay timings. * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. * -------------------------------------------------------------------------- */ @@ -115,7 +119,7 @@ #define MPI2_VERSION_02_05 (0x0205) /*Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x1A) +#define MPI2_HEADER_VERSION_UNIT (0x1D) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) @@ -274,6 +278,8 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { #define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) #define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) #define MPI2_RPHI_MSIX_INDEX_SHIFT (24) +#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /*MPI v2.5 only*/ + /* *Defines for the HCBSize and address diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index d8b2c3eedb5..889aa706789 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2011 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_cnfg.h * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.22 + * mpi2_cnfg.h Version: 02.00.24 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -155,6 +155,11 @@ * Added UEFIVersion field to BIOS Page 1 and defined new * BiosOptions bits. * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. * -------------------------------------------------------------------------- */ @@ -714,6 +719,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 { #define MPI2_MANUFACTURING7_PAGEVERSION (0x01) /*defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) @@ -1310,6 +1316,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { #define MPI2_BIOSPAGE1_PAGEVERSION (0x05) /*values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) + #define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) #define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) #define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) @@ -1884,6 +1893,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { #define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) #define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) #define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) #define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) #define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) #define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) @@ -1897,6 +1907,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { #define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) #define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) #define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) #define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) #define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) #define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h index a079e524247..f7928bf6647 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_init.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index 0de425d8fd7..e2bb8214372 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_ioc.h * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Creation Date: October 11, 2006 * - * mpi2_ioc.h Version: 02.00.21 + * mpi2_ioc.h Version: 02.00.22 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -124,6 +124,9 @@ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. * -------------------------------------------------------------------------- */ @@ -283,6 +286,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY { #define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) /*IOCExceptions */ +#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) #define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) #define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) @@ -634,7 +638,7 @@ typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS { U8 RAIDOperation; /*0x04 */ U8 PercentComplete; /*0x05 */ U16 Reserved2; /*0x06 */ - U32 Resereved3; /*0x08 */ + U32 ElapsedSeconds; /*0x08 */ } MPI2_EVENT_DATA_IR_OPERATION_STATUS, *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, Mpi2EventDataIrOperationStatus_t, diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h index d1d9866cf30..71765236afe 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_raid.h * Title: MPI Integrated RAID messages and structures * Creation Date: April 26, 2007 * - * mpi2_raid.h Version: 02.00.08 + * mpi2_raid.h Version: 02.00.09 * * Version History * --------------- @@ -28,6 +28,8 @@ * Added product-specific range to RAID Action values. * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. * -------------------------------------------------------------------------- */ @@ -269,10 +271,12 @@ typedef struct _MPI2_RAID_VOL_INDICATOR { U64 TotalBlocks; /*0x00 */ U64 BlocksRemaining; /*0x08 */ U32 Flags; /*0x10 */ + U32 ElapsedSeconds; /* 0x14 */ } MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR, Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t; /*defines for RAID Volume Indicator Flags field */ +#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) #define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) #define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) #define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) @@ -312,7 +316,7 @@ typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT { /*RAID Action Reply ActionData union */ typedef union _MPI2_RAID_ACTION_REPLY_DATA { - U32 Word[5]; + U32 Word[6]; MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; U16 VolDevHandle; U8 VolumeState; diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h index b4e7084aba3..cba046f6a4b 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_sas.h diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h index 71453d11c1c..34e9a7ba76b 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -1,12 +1,12 @@ /* - * Copyright (c) 2000-2012 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_tool.h * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.09 + * mpi2_tool.h Version: 02.00.10 * * Version History * --------------- @@ -30,6 +30,8 @@ * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. * -------------------------------------------------------------------------- */ @@ -279,7 +281,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { U16 Reserved6; /*0x0E */ U32 DataLength; /*0x10 */ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ - MPI2_SGE_SIMPLE_UNION SGL; /*0x70 */ + MPI2_MPI_SGE_IO_UNION SGL; /*0x70 */ } MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, Mpi2ToolboxDiagnosticCliRequest_t, @@ -302,7 +304,7 @@ typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { U32 Reserved5; /*0x0C */ U32 DataLength; /*0x10 */ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ - MPI25_SGE_IO_UNION SGL; /*0x70 */ + MPI25_SGE_IO_UNION SGL; /* 0x70 */ } MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, Mpi25ToolboxDiagnosticCliRequest_t, diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h index 516f959573f..ba1fed50966 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 LSI Corporation. + * Copyright (c) 2000-2013 LSI Corporation. * * * Name: mpi2_type.h diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 18360032a52..0cf4f7000f9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -82,6 +82,10 @@ static int msix_disable = -1; module_param(msix_disable, int, 0); MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); +static int max_msix_vectors = 8; +module_param(max_msix_vectors, int, 0); +MODULE_PARM_DESC(max_msix_vectors, + " max msix vectors - (default=8)"); static int mpt3sas_fwfault_debug; MODULE_PARM_DESC(mpt3sas_fwfault_debug, @@ -127,7 +131,7 @@ static int mpt3sas_remove_dead_ioc_func(void *arg) pdev = ioc->pdev; if ((pdev == NULL)) return -1; - pci_stop_and_remove_bus_device(pdev); + pci_stop_and_remove_bus_device_locked(pdev); return 0; } @@ -1709,8 +1713,6 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) int i; u8 try_msix = 0; - INIT_LIST_HEAD(&ioc->reply_queue_list); - if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1723,6 +1725,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc->reply_queue_count = min_t(int, ioc->cpu_count, ioc->msix_vector_count); + printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" + ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, + ioc->cpu_count, max_msix_vectors); + + if (max_msix_vectors > 0) { + ioc->reply_queue_count = min_t(int, max_msix_vectors, + ioc->reply_queue_count); + ioc->msix_vector_count = ioc->reply_queue_count; + } + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), GFP_KERNEL); if (!entries) { @@ -1790,6 +1802,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) if (pci_enable_device_mem(pdev)) { pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", ioc->name); + ioc->bars = 0; return -ENODEV; } @@ -1798,6 +1811,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) MPT3SAS_DRIVER_NAME)) { pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", ioc->name); + ioc->bars = 0; r = -ENODEV; goto out_fail; } @@ -4090,11 +4104,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, &ioc->chip->HostDiagnostic); - /* don't access any registers for 50 milliseconds */ - msleep(50); + /*This delay allows the chip PCIe hardware time to finish reset tasks*/ + if (sleep_flag == CAN_SLEEP) + msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); + else + mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); - /* 300 second max wait */ - for (count = 0; count < 3000000 ; count++) { + /* Approximately 300 second max wait */ + for (count = 0; count < (300000000 / + MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { host_diagnostic = readl(&ioc->chip->HostDiagnostic); @@ -4103,11 +4121,13 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; - /* wait 1 msec */ + /* Wait to pass the second read delay window */ if (sleep_flag == CAN_SLEEP) - usleep_range(1000, 1500); + msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC + / 1000); else - mdelay(1); + mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC + / 1000); } if (host_diagnostic & MPI2_DIAG_HCB_MODE) { @@ -4387,18 +4407,25 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, __func__)); - _base_mask_interrupts(ioc); - ioc->shost_recovery = 1; - _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); - ioc->shost_recovery = 0; + if (ioc->chip_phys && ioc->chip) { + _base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + ioc->shost_recovery = 0; + } + _base_free_irq(ioc); _base_disable_msix(ioc); - if (ioc->chip_phys) + + if (ioc->chip_phys && ioc->chip) iounmap(ioc->chip); ioc->chip_phys = 0; - pci_release_selected_regions(ioc->pdev, ioc->bars); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); + + if (pci_is_enabled(pdev)) { + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } return; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 994656cbfac..9b90a6fef70 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -3,7 +3,7 @@ * for access to MPT (Message Passing Technology) firmware. * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -70,10 +70,10 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "01.100.01.00" -#define MPT3SAS_MAJOR_VERSION 1 +#define MPT3SAS_DRIVER_VERSION "02.100.00.00" +#define MPT3SAS_MAJOR_VERSION 2 #define MPT3SAS_MINOR_VERSION 100 -#define MPT3SAS_BUILD_VERSION 1 +#define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 /* @@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, - ulong timeout, unsigned long serial_number, enum mutex_type m_type); + ulong timeout, enum mutex_type m_type); void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 1df9ed4f371..936ec039199 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -2,7 +2,7 @@ * This module provides common API for accessing firmware configuration pages * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -148,7 +148,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, desc = "raid_config"; break; case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: - desc = "driver_mappping"; + desc = "driver_mapping"; break; } break; diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 054d5231c97..ba9cbe598a9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -503,19 +503,6 @@ _ctl_fasync(int fd, struct file *filep, int mode) } /** - * _ctl_release - - * @inode - - * @filep - - * - * Called when application releases the fasyn callback handler. - */ -static int -_ctl_release(struct inode *inode, struct file *filep) -{ - return fasync_helper(-1, filep, 0, &async_queue); -} - -/** * _ctl_poll - * @file - * @wait - @@ -993,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, mpt3sas_scsih_issue_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, - 0, TM_MUTEX_ON); + TM_MUTEX_ON); } else mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); @@ -3233,7 +3220,6 @@ struct device_attribute *mpt3sas_dev_attrs[] = { static const struct file_operations ctl_fops = { .owner = THIS_MODULE, .unlocked_ioctl = _ctl_ioctl, - .release = _ctl_release, .poll = _ctl_poll, .fasync = _ctl_fasync, #ifdef CONFIG_COMPAT diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index bd89f4f0055..53b0c480d98 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -3,7 +3,7 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h index 35405e7044f..545b22d2cbd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_debug.h +++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h @@ -2,7 +2,7 @@ * Logging Support for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index dcbf7c880cb..18e713db1d3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2,7 +2,7 @@ * Scsi Host Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -675,11 +675,12 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, * devices while scanning is turned on due to an oops in * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() */ - if (!ioc->is_driver_loading) + if (!ioc->is_driver_loading) { mpt3sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); - _scsih_sas_device_remove(ioc, sas_device); + _scsih_sas_device_remove(ioc, sas_device); + } } } @@ -1273,6 +1274,7 @@ _scsih_slave_alloc(struct scsi_device *sdev) struct MPT3SAS_DEVICE *sas_device_priv_data; struct scsi_target *starget; struct _raid_device *raid_device; + struct _sas_device *sas_device; unsigned long flags; sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); @@ -1301,6 +1303,19 @@ _scsih_slave_alloc(struct scsi_device *sdev) spin_unlock_irqrestore(&ioc->raid_device_lock, flags); } + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_target_priv_data->sas_address); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return 0; } @@ -2014,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task * @timeout: timeout in seconds - * @serial_number: the serial_number from scmd * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF * Context: user * @@ -2027,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout, - unsigned long serial_number, enum mutex_type m_type) + enum mutex_type m_type) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; @@ -2278,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd) handle = sas_device_priv_data->sas_target->handle; r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", @@ -2338,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, - TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", @@ -2399,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, - 30, 0, TM_MUTEX_ON); + 30, TM_MUTEX_ON); out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", @@ -3503,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) /** - * _scsih_qcmd_lck - main scsi request entry point + * _scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object * @done: function pointer to be invoked on completion * @@ -3514,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { - struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; Mpi2SCSIIORequest_t *mpi_request; @@ -3529,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) scsi_print_command(scmd); #endif - scmd->scsi_done = done; sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -3644,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) out: return SCSI_MLQUEUE_HOST_BUSY; } -static DEF_SCSI_QCMD(_scsih_qcmd) - /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data @@ -5410,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, - MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, @@ -5452,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_OFF); + TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " @@ -6392,7 +6401,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(sas_device_pg0.DevHandle); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); @@ -6494,7 +6503,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(volume_pg1.DevHandle); @@ -6518,7 +6527,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; phys_disk_num = pd_pg0.PhysDiskNum; handle = le16_to_cpu(pd_pg0.DevHandle); @@ -6597,7 +6606,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) break; handle = le16_to_cpu(expander_pg0.DevHandle); @@ -6742,8 +6751,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \ "ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -6787,8 +6794,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) phys_disk_num))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\ "ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -6854,8 +6859,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ "ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -6914,8 +6917,6 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) - break; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\ " ioc_status(0x%04x), loginfo(0x%08x)\n", @@ -7525,10 +7526,12 @@ _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) sas_address_parent)) { _scsih_sas_device_remove(ioc, sas_device); } else if (!sas_device->starget) { - if (!ioc->is_driver_loading) - mpt3sas_transport_port_remove(ioc, sas_address, + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_address, sas_address_parent); - _scsih_sas_device_remove(ioc, sas_device); + _scsih_sas_device_remove(ioc, sas_device); + } } } } @@ -7584,13 +7587,14 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) * oops in scsi_sysfs_add_sdev()->add_device()-> * sysfs_addrm_start() */ - if (!ioc->is_driver_loading) + if (!ioc->is_driver_loading) { mpt3sas_transport_port_remove(ioc, sas_device->sas_address, sas_device->sas_address_parent); - list_del(&sas_device->list); - kfree(sas_device); - continue; + list_del(&sas_device->list); + kfree(sas_device); + continue; + } } spin_lock_irqsave(&ioc->sas_device_lock, flags); @@ -7769,6 +7773,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); INIT_LIST_HEAD(&ioc->delayed_tr_list); INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->reply_queue_list); /* init shost parameters */ shost->max_cmd_len = 32; diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 87ca2b7287c..65170cb1a00 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -2,7 +2,7 @@ * SAS Transport Layer for MPT (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or @@ -1003,9 +1003,12 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, &mpt3sas_phy->remote_identify); _transport_add_phy_to_an_existing_port(ioc, sas_node, mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); - } else + } else { memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct sas_identify)); + _transport_del_phy_from_an_existing_port(ioc, sas_node, + mpt3sas_phy); + } if (mpt3sas_phy->phy) mpt3sas_phy->phy->negotiated_linkrate = @@ -1881,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); Mpi2SmpPassthroughRequest_t *mpi_request; Mpi2SmpPassthroughReply_t *mpi_reply; - int rc, i; + int rc; u16 smid; u32 ioc_state; unsigned long timeleft; @@ -1895,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, void *pci_addr_out = NULL; u16 wait_state_count; struct request *rsp = req->next_rq; - struct bio_vec *bvec = NULL; + struct bio_vec bvec; + struct bvec_iter iter; if (!rsp) { pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", @@ -1922,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, ioc->transport_cmds.status = MPT3_CMD_PENDING; /* Check if the request is split across multiple segments */ - if (req->bio->bi_vcnt > 1) { + if (bio_multiple_segments(req->bio)) { u32 offset = 0; /* Allocate memory and copy the request */ @@ -1935,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, goto out; } - bio_for_each_segment(bvec, req->bio, i) { + bio_for_each_segment(bvec, req->bio, iter) { memcpy(pci_addr_out + offset, - page_address(bvec->bv_page) + bvec->bv_offset, - bvec->bv_len); - offset += bvec->bv_len; + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; } } else { dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -1954,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* Check if the response needs to be populated across * multiple segments */ - if (rsp->bio->bi_vcnt > 1) { + if (bio_multiple_segments(rsp->bio)) { pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), &pci_dma_in); if (!pci_addr_in) { @@ -2015,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); psge = &mpi_request->SGL; - if (req->bio->bi_vcnt > 1) + if (bio_multiple_segments(req->bio)) ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), pci_dma_in, (blk_rq_bytes(rsp) + 4)); else @@ -2060,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* check if the resp needs to be copied from the allocated * pci mem */ - if (rsp->bio->bi_vcnt > 1) { + if (bio_multiple_segments(rsp->bio)) { u32 offset = 0; u32 bytes_to_copy = le16_to_cpu(mpi_reply->ResponseDataLength); - bio_for_each_segment(bvec, rsp->bio, i) { - if (bytes_to_copy <= bvec->bv_len) { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + offset, bytes_to_copy); break; } else { - memcpy(page_address(bvec->bv_page) + - bvec->bv_offset, pci_addr_in + - offset, bvec->bv_len); - bytes_to_copy -= bvec->bv_len; + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; } - offset += bvec->bv_len; + offset += bvec.bv_len; } } } else { diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c index 6f8d6213040..f6533ab2036 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c @@ -3,7 +3,7 @@ * (Message Passing Technology) based controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h index a10c3090739..bb693923bef 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h @@ -4,7 +4,7 @@ * controllers * * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h - * Copyright (C) 2012 LSI Corporation + * Copyright (C) 2012-2013 LSI Corporation * (mailto:DL-MPTFusionLinux@lsi.com) * * This program is free software; you can redistribute it and/or diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c index c29d0dbb966..e7f6661a886 100644 --- a/drivers/scsi/mvme147.c +++ b/drivers/scsi/mvme147.c @@ -76,7 +76,8 @@ int mvme147_detect(struct scsi_host_template *tpnt) called++; tpnt->proc_name = "MVME147"; - tpnt->proc_info = &wd33c93_proc_info; + tpnt->show_info = wd33c93_show_info, + tpnt->write_info = wd33c93_write_info, instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); if (!instance) diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 1e4479f3331..9270d15ff1a 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) u32 tmp; tmp = mr32(MVS_GBL_CTL); - tmp |= (IRQ_SAS_A | IRQ_SAS_B); + tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); mw32(MVS_GBL_INT_STAT, tmp); writel(tmp, regs + 0x0C); writel(tmp, regs + 0x10); @@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) tmp = mr32(MVS_GBL_CTL); - tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); + tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); mw32(MVS_GBL_INT_STAT, tmp); writel(tmp, regs + 0x0C); writel(tmp, regs + 0x10); @@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) if (!(mvi->flags & MVF_FLAG_SOC)) { stat = mr32(MVS_GBL_INT_STAT); - if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) + if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B))) return 0; } return stat; @@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) { void __iomem *regs = mvi->regs; - if (((stat & IRQ_SAS_A) && mvi->id == 0) || - ((stat & IRQ_SAS_B) && mvi->id == 1)) { + if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) || + ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) { mw32_f(MVS_INT_STAT, CINT_DONE); spin_lock(&mvi->lock); diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 487aa6f9741..14e197497b4 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -150,35 +150,35 @@ enum chip_register_bits { enum pci_interrupt_cause { /* MAIN_IRQ_CAUSE (R10200) Bits*/ - IRQ_COM_IN_I2O_IOP0 = (1 << 0), - IRQ_COM_IN_I2O_IOP1 = (1 << 1), - IRQ_COM_IN_I2O_IOP2 = (1 << 2), - IRQ_COM_IN_I2O_IOP3 = (1 << 3), - IRQ_COM_OUT_I2O_HOS0 = (1 << 4), - IRQ_COM_OUT_I2O_HOS1 = (1 << 5), - IRQ_COM_OUT_I2O_HOS2 = (1 << 6), - IRQ_COM_OUT_I2O_HOS3 = (1 << 7), - IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), - IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), - IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), - IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), - IRQ_PCIF_DRBL0 = (1 << 12), - IRQ_PCIF_DRBL1 = (1 << 13), - IRQ_PCIF_DRBL2 = (1 << 14), - IRQ_PCIF_DRBL3 = (1 << 15), - IRQ_XOR_A = (1 << 16), - IRQ_XOR_B = (1 << 17), - IRQ_SAS_A = (1 << 18), - IRQ_SAS_B = (1 << 19), - IRQ_CPU_CNTRL = (1 << 20), - IRQ_GPIO = (1 << 21), - IRQ_UART = (1 << 22), - IRQ_SPI = (1 << 23), - IRQ_I2C = (1 << 24), - IRQ_SGPIO = (1 << 25), - IRQ_COM_ERR = (1 << 29), - IRQ_I2O_ERR = (1 << 30), - IRQ_PCIE_ERR = (1 << 31), + MVS_IRQ_COM_IN_I2O_IOP0 = (1 << 0), + MVS_IRQ_COM_IN_I2O_IOP1 = (1 << 1), + MVS_IRQ_COM_IN_I2O_IOP2 = (1 << 2), + MVS_IRQ_COM_IN_I2O_IOP3 = (1 << 3), + MVS_IRQ_COM_OUT_I2O_HOS0 = (1 << 4), + MVS_IRQ_COM_OUT_I2O_HOS1 = (1 << 5), + MVS_IRQ_COM_OUT_I2O_HOS2 = (1 << 6), + MVS_IRQ_COM_OUT_I2O_HOS3 = (1 << 7), + MVS_IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), + MVS_IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), + MVS_IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), + MVS_IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), + MVS_IRQ_PCIF_DRBL0 = (1 << 12), + MVS_IRQ_PCIF_DRBL1 = (1 << 13), + MVS_IRQ_PCIF_DRBL2 = (1 << 14), + MVS_IRQ_PCIF_DRBL3 = (1 << 15), + MVS_IRQ_XOR_A = (1 << 16), + MVS_IRQ_XOR_B = (1 << 17), + MVS_IRQ_SAS_A = (1 << 18), + MVS_IRQ_SAS_B = (1 << 19), + MVS_IRQ_CPU_CNTRL = (1 << 20), + MVS_IRQ_GPIO = (1 << 21), + MVS_IRQ_UART = (1 << 22), + MVS_IRQ_SPI = (1 << 23), + MVS_IRQ_I2C = (1 << 24), + MVS_IRQ_SGPIO = (1 << 25), + MVS_IRQ_COM_ERR = (1 << 29), + MVS_IRQ_I2O_ERR = (1 << 30), + MVS_IRQ_PCIE_ERR = (1 << 31), }; union reg_phy_cfg { diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index ce90d0546cd..eacee48a955 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) } for (i = 0; i < MVS_MAX_DEVICES; i++) { mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->devices[i].dev_type = NO_DEVICE; + mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; init_timer(&mvi->devices[i].timer); @@ -657,7 +657,6 @@ static void mvs_pci_remove(struct pci_dev *pdev) tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #endif - pci_set_drvdata(pdev, NULL); sas_unregister_ha(sha); sas_remove_host(mvi->shost); scsi_remove_host(mvi->shost); @@ -703,7 +702,7 @@ static struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(TTI, 0x2744), chip_9480 }, { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, { - .vendor = 0x1b4b, + .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9480, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, @@ -712,7 +711,7 @@ static struct pci_device_id mvs_pci_table[] = { .driver_data = chip_9480, }, { - .vendor = 0x1b4b, + .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9445, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, @@ -721,7 +720,7 @@ static struct pci_device_id mvs_pci_table[] = { .driver_data = chip_9445, }, { - .vendor = 0x1b4b, + .vendor = PCI_VENDOR_ID_MARVELL_EXT, .device = 0x9485, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, @@ -729,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = { .class_mask = 0, .driver_data = chip_9485, }, + { + .vendor = PCI_VENDOR_ID_MARVELL_EXT, + .device = 0x9485, + .subvendor = PCI_ANY_ID, + .subdevice = 0x9485, + .class = 0, + .class_mask = 0, + .driver_data = chip_9485, + }, { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 532110f4562..6c1f223a8e1 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -686,7 +686,8 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, if (ssp_hdr->frame_type != SSP_TASK) { buf_cmd[9] = fburst | task->ssp_task.task_attr | (task->ssp_task.task_prio << 3); - memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); + memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); } else{ buf_cmd[10] = tmf->tmf; switch (tmf->tmf) { @@ -706,7 +707,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, return 0; } -#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, struct mvs_tmf_task *tmf, int *pass) { @@ -726,7 +727,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf * libsas will use dev->port, should * not call task_done for sata */ - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) task->task_done(task); return rc; } @@ -1159,10 +1160,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) phy->identify.device_type = phy->att_dev_info & PORT_DEV_TYPE_MASK; - if (phy->identify.device_type == SAS_END_DEV) + if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) + else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (oob_done) @@ -1260,7 +1261,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) { u32 dev; for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { - if (mvi->devices[dev].dev_type == NO_DEVICE) { + if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { mvi->devices[dev].device_id = dev; return &mvi->devices[dev]; } @@ -1278,7 +1279,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev) u32 id = mvi_dev->device_id; memset(mvi_dev, 0, sizeof(*mvi_dev)); mvi_dev->device_id = id; - mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_type = SAS_PHY_UNUSED; mvi_dev->dev_status = MVS_DEV_NORMAL; mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; } @@ -1410,7 +1411,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, if (res) { del_timer(&task->slow_task->timer); - mv_printk("executing internel task failed:%d\n", res); + mv_printk("executing internal task failed:%d\n", res); goto ex_err; } @@ -1480,7 +1481,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) { int rc; struct sas_phy *phy = sas_get_local_phy(dev); - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; rc = sas_phy_reset(phy, reset_type); sas_put_local_phy(phy); @@ -1629,7 +1630,7 @@ int mvs_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { - if (SATA_DEV == dev->dev_type) { + if (SAS_SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; u32 slot_idx = (u32)(slot - mvi->slot_info); mv_dprintk("mvs_abort_task() mvi=%p task=%p " @@ -1856,11 +1857,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) goto out; } - /* error info record present */ - if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { + /* + * error info record present; slot->response is 32 bit aligned but may + * not be 64 bit aligned, so check for zero in two 32 bit reads + */ + if (unlikely((rx_desc & RXQ_ERR) + && (*((u32 *)slot->response) + || *(((u32 *)slot->response) + 1)))) { mv_dprintk("port %d slot %d rx_desc %X has error info" "%016llX.\n", slot->port->sas_port.id, slot_idx, - rx_desc, (u64)(*(u64 *)slot->response)); + rx_desc, get_unaligned_le64(slot->response)); tstat->stat = mvs_slot_err(mvi, task, slot_idx); tstat->resp = SAS_TASK_COMPLETE; goto out; diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 9f3cc13a5ce..d6b19dc80be 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -39,6 +39,7 @@ #include <linux/irq.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <asm/unaligned.h> #include <scsi/libsas.h> #include <scsi/scsi.h> #include <scsi/scsi_tcq.h> @@ -67,7 +68,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch; extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ - ((type == EDGE_DEV) || (type == FANOUT_DEV)) + ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define bit(n) ((u64)1 << n) @@ -241,7 +242,7 @@ struct mvs_phy { struct mvs_device { struct list_head dev_entry; - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct mvs_info *mvi_info; struct domain_device *sas_device; struct timer_list timer; diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 4594ccaaf49..edbee8dc62c 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -49,8 +49,8 @@ MODULE_AUTHOR("jyli@marvell.com"); MODULE_DESCRIPTION("Marvell UMI Driver"); static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { - { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) }, - { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9580) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, { 0 } }; @@ -2583,7 +2583,6 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; fail_io_attach: - pci_set_drvdata(pdev, NULL); mhba->instancet->disable_intr(mhba); free_irq(mhba->pdev->irq, mhba); fail_init_irq: @@ -2618,7 +2617,6 @@ static void mvumi_detach_one(struct pci_dev *pdev) free_irq(mhba->pdev->irq, mhba); mvumi_release_fw(mhba); scsi_host_put(host); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); dev_dbg(&pdev->dev, "driver is removed!\n"); } diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h index e360135fd1b..41f168702ac 100644 --- a/drivers/scsi/mvumi.h +++ b/drivers/scsi/mvumi.h @@ -32,7 +32,6 @@ #define VER_BUILD 1500 #define MV_DRIVER_NAME "mvumi" -#define PCI_VENDOR_ID_MARVELL_2 0x1b4b #define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 #define PCI_DEVICE_ID_MARVELL_MV9580 0x9580 diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 5982a587bab..7d014b11df6 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -1615,7 +1615,7 @@ struct ncb { spinlock_t smp_lock; /* Lock for SMP threading */ /*---------------------------------------------------------------- - ** Chip and controller indentification. + ** Chip and controller identification. **---------------------------------------------------------------- */ int unit; /* Unit number */ diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 1cc0c1c69c8..0665f9cfdb0 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -192,7 +192,7 @@ static int __init init_nsp32 (void); static void __exit exit_nsp32 (void); /* struct struct scsi_host_template */ -static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); +static int nsp32_show_info (struct seq_file *, struct Scsi_Host *); static int nsp32_detect (struct pci_dev *pdev); static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); @@ -268,7 +268,7 @@ static void nsp32_dmessage(const char *, int, int, char *, ...); static struct scsi_host_template nsp32_template = { .proc_name = "nsp32", .name = "Workbit NinjaSCSI-32Bi/UDE", - .proc_info = nsp32_proc_info, + .show_info = nsp32_show_info, .info = nsp32_info, .queuecommand = nsp32_queuecommand, .can_queue = 1, @@ -1442,19 +1442,10 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id) } #undef SPRINTF -#define SPRINTF(args...) \ - do { \ - if(length > (pos - buffer)) { \ - pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \ - nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\ - } \ - } while(0) - -static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start, - off_t offset, int length, int inout) +#define SPRINTF(args...) seq_printf(m, ##args) + +static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) { - char *pos = buffer; - int thislength; unsigned long flags; nsp32_hw_data *data; int hostno; @@ -1463,11 +1454,6 @@ static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start, int id, speed; long model; - /* Write is not supported, just return. */ - if (inout == TRUE) { - return -EINVAL; - } - hostno = host->host_no; data = (nsp32_hw_data *)host->hostdata; base = host->io_port; @@ -1527,20 +1513,7 @@ static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start, } SPRINTF("\n"); } - - - thislength = pos - (buffer + offset); - - if(thislength < 0) { - *start = NULL; - return 0; - } - - - thislength = min(thislength, length); - *start = buffer + offset; - - return thislength; + return 0; } #undef SPRINTF @@ -2926,7 +2899,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data) * reset SCSI bus */ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST); - udelay(RESET_HOLD_TIME); + mdelay(RESET_HOLD_TIME / 1000); nsp32_write1(base, SCSI_BUS_CONTROL, 0); for(i = 0; i < 5; i++) { intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */ diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index d8293f25ca3..5f4cbf0c475 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or, bio->bi_rw &= ~REQ_WRITE; or->in.bio = bio; - or->in.total_bytes = bio->bi_size; + or->in.total_bytes = bio->bi_iter.bi_size; return 0; } @@ -1049,7 +1049,7 @@ static struct bio *_create_sg_bios(struct osd_request *or, bio = bio_kmalloc(GFP_KERNEL, numentries); if (unlikely(!bio)) { - OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries); + OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries); return ERR_PTR(-ENOMEM); } @@ -1570,6 +1570,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write, if (unlikely(!req)) return ERR_PTR(-ENOMEM); + blk_rq_set_block_pc(req); return req; } } @@ -1590,7 +1591,6 @@ static int _init_blk_request(struct osd_request *or, } or->request = req; - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_QUIET; req->timeout = or->timeout; @@ -1608,7 +1608,7 @@ static int _init_blk_request(struct osd_request *or, ret = PTR_ERR(req); goto out; } - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); or->in.req = or->request->next_rq = req; } } else if (has_in) diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 0fab6b5c7b8..e1d9a4c4c4b 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -107,6 +107,7 @@ static ssize_t osdname_show(struct device *dev, struct device_attribute *attr, class_dev); return sprintf(buf, "%s\n", ould->odi.osdname); } +static DEVICE_ATTR_RO(osdname); static ssize_t systemid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -117,17 +118,19 @@ static ssize_t systemid_show(struct device *dev, struct device_attribute *attr, memcpy(buf, ould->odi.systemid, ould->odi.systemid_len); return ould->odi.systemid_len; } +static DEVICE_ATTR_RO(systemid); -static struct device_attribute osd_uld_attrs[] = { - __ATTR(osdname, S_IRUGO, osdname_show, NULL), - __ATTR(systemid, S_IRUGO, systemid_show, NULL), - __ATTR_NULL, +static struct attribute *osd_uld_attrs[] = { + &dev_attr_osdname.attr, + &dev_attr_systemid.attr, + NULL, }; +ATTRIBUTE_GROUPS(osd_uld); static struct class osd_uld_class = { .owner = THIS_MODULE, .name = "scsi_osd", - .dev_attrs = osd_uld_attrs, + .dev_groups = osd_uld_groups, }; /* @@ -485,7 +488,7 @@ static int osd_probe(struct device *dev) oud->class_dev.class = &osd_uld_class; oud->class_dev.parent = dev; oud->class_dev.release = __remove; - error = dev_set_name(&oud->class_dev, disk->disk_name); + error = dev_set_name(&oud->class_dev, "%s", disk->disk_name); if (error) { OSD_ERR("dev_set_name failed => %d\n", error); goto err_put_cdev; diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 21883a2d632..0727ea7cc38 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -365,7 +365,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd, if (!req) return DRIVER_ERROR << 24; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_QUIET; SRpnt->bio = NULL; diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index 2f72c9807b1..0d78a4d5576 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -388,7 +388,8 @@ int __init pas16_detect(struct scsi_host_template * tpnt) int count; tpnt->proc_name = "pas16"; - tpnt->proc_info = &pas16_proc_info; + tpnt->show_info = pas16_show_info; + tpnt->write_info = pas16_write_info; if (pas16_addr != 0) { overrides[0].io_port = pas16_addr; @@ -452,7 +453,7 @@ int __init pas16_detect(struct scsi_host_template * tpnt) instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, + if (request_irq(instance->irq, pas16_intr, 0, "pas16", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index a04281cace2..aa528f53c53 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h @@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *); #define CAN_QUEUE 32 #endif -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ volatile unsigned short io_port @@ -163,13 +161,13 @@ static int pas16_bus_reset(Scsi_Cmnd *); #define NCR5380_queue_command pas16_queue_command #define NCR5380_abort pas16_abort #define NCR5380_bus_reset pas16_bus_reset -#define NCR5380_proc_info pas16_proc_info +#define NCR5380_show_info pas16_show_info +#define NCR5380_write_info pas16_write_info /* 15 14 12 10 7 5 3 1101 0100 1010 1000 */ #define PAS16_IRQS 0xd4a8 -#endif /* else def HOSTS_C */ #endif /* ndef ASM */ #endif /* PAS16_H */ diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index b61a753eb89..987fbb1b244 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -76,7 +76,7 @@ MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 static struct scsi_host_template nsp_driver_template = { .proc_name = "nsp_cs", - .proc_info = nsp_proc_info, + .show_info = nsp_show_info, .name = "WorkBit NinjaSCSI-3/32Bi(16bit)", .info = nsp_info, .queuecommand = nsp_queuecommand, @@ -1365,33 +1365,19 @@ static const char *nsp_info(struct Scsi_Host *shpnt) } #undef SPRINTF -#define SPRINTF(args...) \ - do { \ - if(length > (pos - buffer)) { \ - pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \ - nsp_dbg(NSP_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\ - } \ - } while(0) - -static int nsp_proc_info(struct Scsi_Host *host, char *buffer, char **start, - off_t offset, int length, int inout) +#define SPRINTF(args...) seq_printf(m, ##args) + +static int nsp_show_info(struct seq_file *m, struct Scsi_Host *host) { int id; - char *pos = buffer; - int thislength; int speed; unsigned long flags; nsp_hw_data *data; int hostno; - if (inout) { - return -EINVAL; - } - hostno = host->host_no; data = (nsp_hw_data *)host->hostdata; - SPRINTF("NinjaSCSI status\n\n"); SPRINTF("Driver version: $Revision: 1.23 $\n"); SPRINTF("SCSI host No.: %d\n", hostno); @@ -1458,19 +1444,7 @@ static int nsp_proc_info(struct Scsi_Host *host, char *buffer, char **start, } SPRINTF("\n"); } - - thislength = pos - (buffer + offset); - - if(thislength < 0) { - *start = NULL; - return 0; - } - - - thislength = min(thislength, length); - *start = buffer + offset; - - return thislength; + return 0; } #undef SPRINTF diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h index 7fc9a9d0a44..afd64f0adc4 100644 --- a/drivers/scsi/pcmcia/nsp_cs.h +++ b/drivers/scsi/pcmcia/nsp_cs.h @@ -292,13 +292,8 @@ static int nsp_cs_config (struct pcmcia_device *link); /* Linux SCSI subsystem specific functions */ static struct Scsi_Host *nsp_detect (struct scsi_host_template *sht); static const char *nsp_info (struct Scsi_Host *shpnt); -static int nsp_proc_info ( - struct Scsi_Host *host, - char *buffer, - char **start, - off_t offset, - int length, - int inout); +static int nsp_show_info (struct seq_file *m, + struct Scsi_Host *host); static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); /* Error handler */ diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile index 52f04296171..ce4cd87c7c6 100644 --- a/drivers/scsi/pm8001/Makefile +++ b/drivers/scsi/pm8001/Makefile @@ -4,9 +4,10 @@ # Copyright (C) 2008-2009 USI Co., Ltd. -obj-$(CONFIG_SCSI_PM8001) += pm8001.o -pm8001-y += pm8001_init.o \ +obj-$(CONFIG_SCSI_PM8001) += pm80xx.o +pm80xx-y += pm8001_init.o \ pm8001_sas.o \ pm8001_ctl.o \ - pm8001_hwi.o + pm8001_hwi.o \ + pm80xx_hwi.o diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 45bc197bc22..a368d77b8d4 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.interface_rev); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); + } } static DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); @@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); + } else { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + } } static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); /** @@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.max_out_io); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); + } } static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); /** @@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16) + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16) + ); + } } static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); /** @@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF + ); + } } static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); @@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; + /* fe000000 means supports SAS2.1 */ + if (pm8001_ha->chip_id == chip_8001) + mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + else + /* fe000000 means supports SAS2.1 */ + mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag & + 0xfe000000)>>25; return show_sas_spec_support_status(mode, buf); } static DEVICE_ATTR(sas_spec_support, S_IRUGO, @@ -270,6 +309,106 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev, } static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); /** + * pm8001_ctl_ib_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @buf: the buffer returned + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; +#define IB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[IB].virt_ptr + \ + pm8001_ha->evtlog_ib_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; + if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) + pm8001_ha->evtlog_ib_offset = 0; + + return str - buf; +} + +static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); +/** + * pm8001_ctl_ob_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @buf: the buffer returned + * A sysfs 'read-only' shost attribute. + */ + +static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + int offset; + char *str = buf; + int start = 0; +#define OB_MEMMAP(c) \ + (*(u32 *)((u8 *)pm8001_ha-> \ + memoryMap.region[OB].virt_ptr + \ + pm8001_ha->evtlog_ob_offset + (c))) + + for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { + str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); + start = start + 4; + } + pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; + if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) + pm8001_ha->evtlog_ob_offset = 0; + + return str - buf; +} +static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); +/** + * pm8001_ctl_bios_version_show - Bios version Display + * @cdev:pointer to embedded class device + * @buf:the buffer returned + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + char *str = buf; + void *virt_addr; + int bios_index; + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + + pm8001_ha->nvmd_completion = &completion; + payload.minor_function = 7; + payload.offset = 0; + payload.length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; + PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + wait_for_completion(&completion); + virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; + for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; + bios_index++) + str += sprintf(str, "%c", + *((u8 *)((u8 *)virt_addr+bios_index))); + kfree(payload.func_specific); + return str - buf; +} +static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); +/** * pm8001_ctl_aap_log_show - IOP event log * @cdev: pointer to embedded class device * @buf: the buffer returned @@ -305,6 +444,43 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, } static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); +/** + ** pm8001_ctl_fatal_log_show - fatal error logging + ** @cdev:pointer to embedded class device + ** @buf: the buffer returned + ** + ** A sysfs 'read-only' shost attribute. + **/ + +static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + ssize_t count; + + count = pm80xx_get_fatal_dump(cdev, attr, buf); + return count; +} + +static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); + + +/** + ** pm8001_ctl_gsm_log_show - gsm dump collection + ** @cdev:pointer to embedded class device + ** @buf: the buffer returned + **A sysfs 'read-only' shost attribute. + **/ +static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + ssize_t count; + + count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf); + return count; +} + +static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); + #define FLASH_CMD_NONE 0x00 #define FLASH_CMD_UPDATE 0x01 #define FLASH_CMD_SET_NVMD 0x02 @@ -361,10 +537,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) goto out; } payload = (struct pm8001_ioctl_payload *)ioctlbuffer; - memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, pm8001_ha->fw_image->size); payload->length = pm8001_ha->fw_image->size; payload->id = 0; + payload->minor_function = 0x1; pm8001_ha->nvmd_completion = &completion; ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); wait_for_completion(&completion); @@ -411,7 +588,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) payload->length = 1024*16; payload->id = 0; fwControl = - (struct fw_control_info *)payload->func_specific; + (struct fw_control_info *)&payload->func_specific; fwControl->len = IOCTL_BUF_SIZE; /* IN */ fwControl->size = partitionSize + HEADER_LEN;/* IN */ fwControl->retcode = 0;/* OUT */ @@ -555,7 +732,7 @@ static ssize_t pm8001_show_update_fw(struct device *cdev, flash_error_table[i].reason); } -static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO, +static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP, pm8001_show_update_fw, pm8001_store_update_fw); struct device_attribute *pm8001_host_attrs[] = { &dev_attr_interface_rev, @@ -563,12 +740,17 @@ struct device_attribute *pm8001_host_attrs[] = { &dev_attr_update_fw, &dev_attr_aap_log, &dev_attr_iop_log, + &dev_attr_fatal_log, + &dev_attr_gsm_log, &dev_attr_max_out_io, &dev_attr_max_devices, &dev_attr_max_sg_list, &dev_attr_sas_spec_support, &dev_attr_logging_level, &dev_attr_host_sas_address, + &dev_attr_bios_version, + &dev_attr_ib_log, + &dev_attr_ob_log, NULL, }; diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h index 63ad4aa0c42..d0d43a250b9 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.h +++ b/drivers/scsi/pm8001/pm8001_ctl.h @@ -45,6 +45,8 @@ #define HEADER_LEN 28 #define SIZE_OFFSET 16 +#define BIOSOFFSET 56 +#define BIOS_OFFSET_LIMIT 61 #define FLASH_OK 0x000000 #define FAIL_OPEN_BIOS_FILE 0x000100 @@ -53,5 +55,9 @@ #define FAIL_OUT_MEMORY 0x000c00 #define FLASH_IN_PROGRESS 0x001000 +#define IB_OB_READ_TIMES 256 +#define SYSFS_OFFSET 1024 +#define PM80XX_IB_OB_QUEUE_SIZE (32 * 1024) +#define PM8001_IB_OB_QUEUE_SIZE (16 * 1024) #endif /* PM8001_CTL_H_INCLUDED */ diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index c3d20c8d4ab..74a4bb9af07 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -43,9 +43,15 @@ enum chip_flavors { chip_8001, + chip_8008, + chip_8009, + chip_8018, + chip_8019, + chip_8074, + chip_8076, + chip_8077 }; -#define USI_MAX_MEMCNT 9 -#define PM8001_MAX_DMA_SG SG_ALL + enum phy_speed { PHY_SPEED_15 = 0x01, PHY_SPEED_30 = 0x02, @@ -69,23 +75,35 @@ enum port_type { #define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ #define PM8001_MAX_INB_NUM 1 #define PM8001_MAX_OUTB_NUM 1 +#define PM8001_MAX_SPCV_INB_NUM 1 +#define PM8001_MAX_SPCV_OUTB_NUM 4 #define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ +/* Inbound/Outbound queue size */ +#define IOMB_SIZE_SPC 64 +#define IOMB_SIZE_SPCV 128 + /* unchangeable hardware details */ -#define PM8001_MAX_PHYS 8 /* max. possible phys */ -#define PM8001_MAX_PORTS 8 /* max. possible ports */ -#define PM8001_MAX_DEVICES 1024 /* max supported device */ +#define PM8001_MAX_PHYS 16 /* max. possible phys */ +#define PM8001_MAX_PORTS 16 /* max. possible ports */ +#define PM8001_MAX_DEVICES 2048 /* max supported device */ +#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define USI_MAX_MEMCNT_BASE 5 +#define IB (USI_MAX_MEMCNT_BASE + 1) +#define CI (IB + PM8001_MAX_SPCV_INB_NUM) +#define OB (CI + PM8001_MAX_SPCV_INB_NUM) +#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM) +#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM) +#define PM8001_MAX_DMA_SG SG_ALL enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ IOP, /* IO processor */ - CI, /* consumer index */ - PI, /* producer index */ - IB, /* inbound queue */ - OB, /* outbound queue */ NVMD, /* NVM device */ DEV_MEM, /* memory for devices */ CCB_MEM, /* memory for command control block */ + FW_FLASH, /* memory for fw flash update */ + FORENSIC_MEM /* memory for fw forensic data */ }; #define PM8001_EVENT_LOG_SIZE (128 * 1024) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index b8dd05074ab..a97be015e52 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -50,32 +50,39 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; - pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); - pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); - pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); - pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); - pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); - pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); - pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); - pm8001_ha->main_cfg_tbl.inbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = + pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = + pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = + pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = + pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = + pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = + pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = + pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = pm8001_mr32(address, MAIN_IBQ_OFFSET); - pm8001_ha->main_cfg_tbl.outbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = pm8001_mr32(address, MAIN_OBQ_OFFSET); - pm8001_ha->main_cfg_tbl.hda_mode_flag = + pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); /* read analog Setting offset from the configuration table */ - pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); /* read Error Dump Offset and Length */ - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); } @@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->general_stat_tbl_addr; - pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); - pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); - pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); - pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); - pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); - pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); - pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); - pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); - pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); - pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); - pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); - pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); - pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); - pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); - pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); - pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); - pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); - pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); - pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); - pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); - pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); - pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); - pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); - pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); - pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); + pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = + pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = + pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = + pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = + pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = + pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd = + pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = + pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = + pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = + pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = + pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = + pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = + pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = + pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = + pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = + pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = + pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = + pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = + pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = + pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = + pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = + pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = + pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = + pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = + pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = + pm8001_mr32(address, 0x60); } /** @@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) */ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int inbQ_num = 1; int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; - for (i = 0; i < inbQ_num; i++) { + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int outbQ_num = 1; int i; void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; - for (i = 0; i < outbQ_num; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { u32 offset = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) { - int qn = 1; int i; u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; - - pm8001_ha->main_cfg_tbl.upper_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; - pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_lo; - pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < qn; i++) { + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = - PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_hi; + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_lo; + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[IB].total_len; + pm8001_ha->memoryMap.region[IB + i].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_hi; + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_lo; + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = - pm8001_ha->memoryMap.region[CI].virt_ptr; + pm8001_ha->memoryMap.region[CI + i].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, @@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < qn; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = - PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_hi; + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_lo; + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[OB].total_len; + pm8001_ha->memoryMap.region[OB + i].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_hi; + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_lo; + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = - 0 | (10 << 16) | (0 << 24); + 0 | (10 << 16) | (i << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = - pm8001_ha->memoryMap.region[PI].virt_ptr; + pm8001_ha->memoryMap.region[PI + i].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, @@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; pm8001_mw32(address, 0x24, - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); pm8001_mw32(address, 0x28, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); pm8001_mw32(address, 0x2C, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); pm8001_mw32(address, 0x30, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); pm8001_mw32(address, 0x34, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); pm8001_mw32(address, 0x38, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid0_3); pm8001_mw32(address, 0x3C, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid4_7); pm8001_mw32(address, 0x40, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid0_3); pm8001_mw32(address, 0x44, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid4_7); pm8001_mw32(address, 0x48, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid0_3); pm8001_mw32(address, 0x4C, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid4_7); pm8001_mw32(address, 0x50, - pm8001_ha->main_cfg_tbl.upper_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); pm8001_mw32(address, 0x54, - pm8001_ha->main_cfg_tbl.lower_event_log_addr); - pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); - pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); + pm8001_mw32(address, 0x5C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); pm8001_mw32(address, 0x60, - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); pm8001_mw32(address, 0x64, - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); - pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); pm8001_mw32(address, 0x6C, - pm8001_ha->main_cfg_tbl.iop_event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); pm8001_mw32(address, 0x70, - pm8001_ha->main_cfg_tbl.fatal_err_interrupt); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); } /** @@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) */ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) { + u8 i = 0; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + /* 8081 controllers need BAR shift to access MPI space + * as this is shared with BIOS data */ + if (deviceid == 0x8081 || deviceid == 0x0042) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } /* check the firmware status */ if (-1 == check_fw_ready(pm8001_ha)) { PM8001_FAIL_DBG(pm8001_ha, @@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - update_inbnd_queue_table(pm8001_ha, 0); - update_outbnd_queue_table(pm8001_ha, 0); - mpi_set_phys_g3_with_ssc(pm8001_ha, 0); - /* 7->130ms, 34->500ms, 119->1.5s */ - mpi_set_open_retry_interval_reg(pm8001_ha, 119); + for (i = 0; i < PM8001_MAX_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + /* 8081 controller donot require these operations */ + if (deviceid != 0x8081 && deviceid != 0x0042) { + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + /* 7->130ms, 34->500ms, 119->1.5s */ + mpi_set_open_retry_interval_reg(pm8001_ha, 119); + } /* notify firmware update finished and check initialization status */ if (0 == mpi_init_check(pm8001_ha)) { PM8001_INIT_DBG(pm8001_ha, @@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) u32 max_wait_count; u32 value; u32 gst_len_mpistate; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + if (deviceid == 0x8081 || deviceid == 0x0042) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } init_pci_device_addresses(pm8001_ha); /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the table is stop */ @@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all * the FW register status to the originated status. * @pm8001_ha: our hba card information - * @signature: signature in host scratch pad0 register. */ static int -pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) { u32 regVal, toggleVal; u32 max_wait_count; u32 regVal1, regVal2, regVal3; + u32 signature = 0x252acbcd; /* for host scratch pad0 */ unsigned long flags; /* step1: Check FW is ready for soft reset */ @@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_iounmap - which maped when initialized. * @pm8001_ha: our hba card information */ -static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) { s8 bar, logical = 0; for (bar = 0; bar < 6; bar++) { @@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); @@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); @@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) } /** - * mpi_msg_free_get- get the free message buffer for transfer inbound queue. + * pm8001_mpi_msg_free_get - get the free message buffer for transfer + * inbound queue. * @circularQ: the inbound queue we want to transfer to HBA. * @messageSize: the message size of this transfer, normally it is 64 bytes * @messagePtr: the pointer to message. */ -static int mpi_msg_free_get(struct inbound_queue_table *circularQ, +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr) { u32 offset, consumer_index; @@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, u8 bcCount = 1; /* only support single buffer */ /* Checks is the requested message size can be allocated in this queue*/ - if (messageSize > 64) { + if (messageSize > IOMB_SIZE_SPCV) { *messagePtr = NULL; return -1; } @@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, return -1; } /* get memory IOMB buffer address */ - offset = circularQ->producer_idx * 64; + offset = circularQ->producer_idx * messageSize; /* increment to next bcCount element */ circularQ->producer_idx = (circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE; @@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, } /** - * mpi_build_cmd- build the message queue for transfer, update the PI to FW - * to tell the fw to get this message from IOMB. + * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to + * FW to tell the fw to get this message from IOMB. * @pm8001_ha: our hba card information * @circularQ: the inbound queue we want to transfer to HBA. * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. */ -static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, - u32 opCode, void *payload) + u32 opCode, void *payload, u32 responseQueue) { u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; - u32 responseQueue = 0; void *pMessage; - if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { + if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, + &pMessage) < 0) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("No free mpi buffer\n")); return -1; } BUG_ON(!payload); /*Copy to the payload*/ - memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); + memcpy(pMessage, payload, (pm8001_ha->iomb_size - + sizeof(struct mpi_msg_hdr))); /*Build the header*/ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) @@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); PM8001_IO_DBG(pm8001_ha, - pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, - circularQ->consumer_index)); + pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", + responseQueue, opCode, circularQ->producer_idx, + circularQ->consumer_index)); return 0; } -static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, struct outbound_queue_table *circularQ, u8 bc) { u32 producer_index; @@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); if (pOutBoundMsgHeader != msgHeader) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("consumer_idx = %d msgHeader = %p\n", @@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, } /** - * mpi_msg_consume- get the MPI message from outbound queue message table. + * pm8001_mpi_msg_consume- get the MPI message from outbound queue + * message table. * @pm8001_ha: our hba card information * @circularQ: the outbound queue table. * @messagePtr1: the message contents of this outbound message. * @pBC: the message size. */ -static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, struct outbound_queue_table *circularQ, void **messagePtr1, u8 *pBC) { @@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, /*Get the pointer to the circular queue buffer element*/ msgHeader = (struct mpi_msg_hdr *) (circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); /* read header */ header_tmp = pm8001_read_32(msgHeader); msgHeader_tmp = cpu_to_le32(header_tmp); @@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, return MPI_IO_STATUS_BUSY; } -static void pm8001_work_fn(struct work_struct *work) +void pm8001_work_fn(struct work_struct *work) { struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; @@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work) pm8001_dev = pw->data; /* Most stash device structure */ if ((pm8001_dev == NULL) || ((pw->handler != IO_XFER_ERROR_BREAK) - && (pm8001_dev->dev_type == NO_DEVICE))) { + && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { kfree(pw); return; } @@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work) } break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: dev = pm8001_dev->sas_device; - pm8001_I_T_nexus_reset(dev); + pm8001_I_T_nexus_event_handler(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: dev = pm8001_dev->sas_device; @@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work) kfree(pw); } -static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { struct pm8001_work *pw; @@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, return ret; } +static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -1677,6 +1868,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%016llx", SAS_ADDR(t->dev->sas_addr))); + switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" @@ -1867,7 +2065,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) break; } PM8001_IO_DBG(pm8001_ha, - pm8001_printk("scsi_status = %x \n ", + pm8001_printk("scsi_status = %x\n ", psspPayload->ssp_resp_iu.status)); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; @@ -2085,6 +2283,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 param; u32 status; u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low; + u8 sata_addr_hi[4]; + u32 temp_sata_addr_hi; struct sata_completion_resp *psataPayload; struct task_status_struct *ts; struct ata_task_resp *resp ; @@ -2096,23 +2299,103 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) status = le32_to_cpu(psataPayload->status); tag = le32_to_cpu(psataPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } ccb = &pm8001_ha->ccb_info[tag]; param = le32_to_cpu(psataPayload->param); - t = ccb->task; - ts = &t->task_status; - pm8001_dev = ccb->device; - if (status) + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("sata IO status 0x%x\n", status)); - if (unlikely(!t || !t->lldd_task || !t->dev)) + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); return; + } + ts = &t->task_status; + if (!ts) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ts null\n")); + return; + } + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { + for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%08x%08x", temp_sata_addr_hi, + temp_sata_addr_low)); + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%016llx", SAS_ADDR(t->dev->sas_addr))); + } + } switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm8001_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2219,11 +2502,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/*in order to force CPU ordering*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2239,11 +2518,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/*ditto*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2267,11 +2542,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/* ditto*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2334,11 +2605,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_DS_NON_OPERATIONAL); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/*ditto*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2358,11 +2625,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_DS_IN_ERROR); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/*ditto*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2391,20 +2654,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else if (t->uldd_task) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/* ditto */ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); - } else if (!t->uldd_task) { + } else { spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/*ditto*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); } } @@ -2424,6 +2676,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm8001_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; if (event) @@ -2432,9 +2707,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("port_id = %x,device_id = %x\n", - port_id, dev_id)); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, dev_id, tag, event)); switch (event) { case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); @@ -2490,11 +2765,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/*ditto*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2603,20 +2874,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, event, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else if (t->uldd_task) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/* ditto */ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); - } else if (!t->uldd_task) { + } else { spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - mb();/*ditto*/ - spin_unlock_irq(&pm8001_ha->lock); - t->task_done(t); - spin_lock_irq(&pm8001_ha->lock); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); } } @@ -2822,8 +3082,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static void -mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { struct set_dev_state_resp *pPayload = (struct set_dev_state_resp *)(piomb + 4); @@ -2832,8 +3092,8 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) struct pm8001_device *pm8001_dev = ccb->device; u32 status = le32_to_cpu(pPayload->status); u32 device_id = le32_to_cpu(pPayload->device_id); - u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; - u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; + u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS; + u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state " "from 0x%x to 0x%x status = 0x%x!\n", device_id, pds, nds, status)); @@ -2843,8 +3103,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); @@ -2863,8 +3122,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void +pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct fw_control_ex *fw_control_context; struct get_nvm_data_resp *pPayload = @@ -2925,7 +3184,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct local_phy_ctl_resp *pPayload = (struct local_phy_ctl_resp *)(piomb + 4); @@ -2954,7 +3213,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) * while receive a broadcast(change) primitive just tell the sas * layer to discover the changed domain rather than the whole domain. */ -static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) { struct pm8001_phy *phy = &pm8001_ha->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -2988,7 +3247,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) } /* Get the link rate speed */ -static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) { struct sas_phy *sas_phy = phy->sas_phy.phy; @@ -3025,7 +3284,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame * buffer. */ -static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr) { if (phy->sas_phy.frame_rcvd[0] == 0x34 @@ -3067,7 +3326,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, ((phyId & 0x0F) << 4) | (port_id & 0x0F)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); } static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, @@ -3098,6 +3357,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) unsigned long flags; u8 deviceType = pPayload->sas_identify.dev_type; port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", port_id, phy_id)); @@ -3112,19 +3372,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, PHY_NOTIFY_ENABLE_SPINUP); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_EDGE_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_FANOUT_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("fanout expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; default: PM8001_MSG_DBG(pm8001_ha, @@ -3178,8 +3438,9 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPC; port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; phy->phy_attached = 1; phy->sas_phy.oob_mode = SATA_OOB_MODE; @@ -3189,7 +3450,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) sizeof(struct dev_to_host_fis)); phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; - phy->identify.device_type = SATA_DEV; + phy->identify.device_type = SAS_SATA_DEV; pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); pm8001_bytes_dmaed(pm8001_ha, phy_id); @@ -3260,7 +3521,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_reg_resp -process register device ID response. + * pm8001_mpi_reg_resp -process register device ID response. * @pm8001_ha: our hba card information * @piomb: IO message buffer * @@ -3269,7 +3530,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * has assigned, from now,inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ -static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3331,7 +3592,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3347,8 +3608,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +/** + * fw_flash_update_resp - Response from FW for flash update command. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { u32 status; struct fw_control_ex fw_control_context; @@ -3403,10 +3669,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) break; } ccb->fw_control_context->fw_control->retcode = status; - pci_free_consistent(pm8001_ha->pdev, - fw_control_context.len, - fw_control_context.virtAddr, - fw_control_context.phys_addr); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; @@ -3414,8 +3676,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { u32 status; int i; @@ -3431,8 +3692,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) return 0; } -static int -mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; @@ -3440,19 +3700,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 status ; u32 tag, scp; struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; struct task_abort_resp *pPayload = (struct task_abort_resp *)(piomb + 4); status = le32_to_cpu(pPayload->status); tag = le32_to_cpu(pPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TAG NULL. RETURNING !!!")); + return -1; + } + scp = le32_to_cpu(pPayload->scp); ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk(" status = 0x%x\n", status)); - if (t == NULL) + pm8001_dev = ccb->device; /* retrieve device */ + + if (!t) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TASK NULL. RETURNING !!!")); return -1; + } ts = &t->task_status; if (status != 0) PM8001_FAIL_DBG(pm8001_ha, @@ -3476,7 +3746,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb(); - t->task_done(t); + + if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) { + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + /* clear the flag */ + pm8001_dev->id &= 0xBFFFFFFF; + } else + t->task_done(t); + return 0; } @@ -3727,17 +4005,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_LOCAL_PHY_CNTRL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); - mpi_local_phy_ctl(pm8001_ha, piomb); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); break; case OPC_OUB_DEV_REGIST: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_REGIST\n")); - mpi_reg_resp(pm8001_ha, piomb); + pm8001_mpi_reg_resp(pm8001_ha, piomb); break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister the device\n")); - mpi_dereg_resp(pm8001_ha, piomb); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: PM8001_MSG_DBG(pm8001_ha, @@ -3775,7 +4053,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_FW_FLASH_UPDATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); - mpi_fw_flash_update_resp(pm8001_ha, piomb); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); break; case OPC_OUB_GPIO_RESPONSE: PM8001_MSG_DBG(pm8001_ha, @@ -3788,17 +4066,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_GENERAL_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); - mpi_general_event(pm8001_ha, piomb); + pm8001_mpi_general_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SATA_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SAS_DIAG_MODE_START_END: PM8001_MSG_DBG(pm8001_ha, @@ -3823,17 +4101,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SMP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); - mpi_get_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_SET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); - mpi_set_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_DEVICE_HANDLE_REMOVAL: PM8001_MSG_DBG(pm8001_ha, @@ -3842,7 +4120,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); - mpi_set_dev_state_resp(pm8001_ha, piomb); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, @@ -3864,7 +4142,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static int process_oq(struct pm8001_hba_info *pm8001_ha) +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; @@ -3873,14 +4151,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha) unsigned long flags; spin_lock_irqsave(&pm8001_ha->lock, flags); - circularQ = &pm8001_ha->outbnd_q_tbl[0]; + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; do { - ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); /* free the message from the outbound circular buffer */ - mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); } if (MPI_IO_STATUS_BUSY == ret) { /* Update the producer index from SPC */ @@ -3903,7 +4182,7 @@ static const u8 data_dir_flags[] = { [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ }; -static void +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) { int i; @@ -3978,7 +4257,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, smp_cmd.long_smp_req.long_resp_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); - mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); return 0; err_out_2: @@ -4019,7 +4298,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); - memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); circularQ = &pm8001_ha->inbnd_q_tbl[0]; /* fill in PRD (scatter/gather) table, if any */ @@ -4042,7 +4322,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0); return ret; } @@ -4060,6 +4340,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 ATAP = 0x0; u32 dir; struct inbound_queue_table *circularQ; + unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -4080,8 +4361,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } } - if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); ncg_tag = hdr_tag; + } dir = data_dir_flags[task->data_dir] << 8; sata_cmd.tag = cpu_to_le32(tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); @@ -4112,7 +4395,43 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + } else { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free_done(pm8001_ha, task, + ccb, tag); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); return ret; } @@ -4142,12 +4461,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); - payload.sas_identify.dev_type = SAS_END_DEV; + payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } @@ -4157,7 +4476,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) * @num: the inbound queue number * @phy_id: the phy id which we wanted to start up. */ -static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; @@ -4169,12 +4488,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 flag) @@ -4204,11 +4523,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, if (flag == 1) stp_sspsmp_sata = 0x02; /*direct attached sata */ else { - if (pm8001_dev->dev_type == SATA_DEV) + if (pm8001_dev->dev_type == SAS_SATA_DEV) stp_sspsmp_sata = 0x00; /* stp*/ - else if (pm8001_dev->dev_type == SAS_END_DEV || - pm8001_dev->dev_type == EDGE_DEV || - pm8001_dev->dev_type == FANOUT_DEV) + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) stp_sspsmp_sata = 0x01; /*ssp or smp*/ } if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) @@ -4228,14 +4547,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ -static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id) { struct dereg_dev_req payload; @@ -4249,7 +4568,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4272,7 +4591,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(1); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4296,11 +4615,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) * @stat: stat. */ static irqreturn_t -pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) { - pm8001_chip_interrupt_disable(pm8001_ha); - process_oq(pm8001_ha); - pm8001_chip_interrupt_enable(pm8001_ha); + pm8001_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm8001_chip_interrupt_enable(pm8001_ha, vec); return IRQ_HANDLED; } @@ -4322,7 +4641,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); return ret; } @@ -4331,16 +4650,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, * @task: the task we wanted to aborted. * @flag: the abort flag. */ -static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) { u32 opc, device_id; int rc = TMF_RESP_FUNC_FAILED; - PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" - " = %x", cmd_tag, task_tag)); - if (pm8001_dev->dev_type == SAS_END_DEV) + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("cmd_tag = %x, abort task tag = 0x%x", + cmd_tag, task_tag)); + if (pm8001_dev->dev_type == SAS_END_DEVICE) opc = OPC_INB_SSP_ABORT; - else if (pm8001_dev->dev_type == SATA_DEV) + else if (pm8001_dev->dev_type == SAS_SATA_DEV) opc = OPC_INB_SATA_ABORT; else opc = OPC_INB_SMP_ABORT;/* SMP */ @@ -4358,7 +4678,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, * @ccb: the ccb information. * @tmf: task management function. */ -static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { struct sas_task *task = ccb->task; @@ -4375,12 +4695,14 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, sspTMCmd.tmf = cpu_to_le32(tmf->tmf); memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); + if (pm8001_ha->chip_id != chip_8001) + sspTMCmd.ds_ads_m = 0x08; circularQ = &pm8001_ha->inbnd_q_tbl[0]; - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); return ret; } -static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_GET_NVMD_DATA; @@ -4397,7 +4719,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; + fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; fw_control_context->len = ioctl_payload->length; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&nvmd_req, 0, sizeof(nvmd_req)); @@ -4453,14 +4775,24 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; } + case IOP_RDUMP: { + nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP); + nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); + nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset); + nvmd_req.resp_addr_hi = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); + nvmd_req.resp_addr_lo = + cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); + break; + } default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } -static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_SET_NVMD_DATA; @@ -4479,7 +4811,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, return -ENOMEM; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, - ioctl_payload->func_specific, + &ioctl_payload->func_specific, ioctl_payload->length); memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); @@ -4536,7 +4868,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } @@ -4545,7 +4877,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information. * @fw_flash_updata_info: firmware flash update param */ -static int +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, void *fw_flash_updata_info, u32 tag) { @@ -4567,11 +4899,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); payload.sgl_addr_hi = cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } -static int +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, void *payload) { @@ -4581,29 +4913,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, int rc; u32 tag; struct pm8001_ccb_info *ccb; - void *buffer = NULL; - dma_addr_t phys_addr; - u32 phys_addr_hi; - u32 phys_addr_lo; + void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; + dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; struct pm8001_ioctl_payload *ioctl_payload = payload; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; - if (fw_control->len != 0) { - if (pm8001_mem_alloc(pm8001_ha->pdev, - (void **)&buffer, - &phys_addr, - &phys_addr_hi, - &phys_addr_lo, - fw_control->len, 0) != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("Mem alloc failure\n")); - kfree(fw_control_context); - return -ENOMEM; - } - } + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); @@ -4613,6 +4930,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, flash_update_info.total_image_len = fw_control->size; fw_control_context->fw_control = fw_control; fw_control_context->virtAddr = buffer; + fw_control_context->phys_addr = phys_addr; fw_control_context->len = fw_control->len; rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { @@ -4627,7 +4945,85 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, return rc; } -static int +ssize_t +pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf) +{ + u32 value, rem, offset = 0, bar = 0; + u32 index, work_offset, dw_length; + u32 shift_value, gsm_base, gsm_dump_offset; + char *direct_data; + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + direct_data = buf; + gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset; + + /* check max is 1 Mbytes */ + if ((length > 0x100000) || (gsm_dump_offset & 3) || + ((gsm_dump_offset + length) > 0x1000000)) + return -EINVAL; + + if (pm8001_ha->chip_id == chip_8001) + bar = 2; + else + bar = 1; + + work_offset = gsm_dump_offset & 0xFFFF0000; + offset = gsm_dump_offset & 0x0000FFFF; + gsm_dump_offset = work_offset; + /* adjust length to dword boundary */ + rem = length & 3; + dw_length = length >> 2; + + for (index = 0; index < dw_length; index++) { + if ((work_offset + offset) & 0xFFFF0000) { + if (pm8001_ha->chip_id == chip_8001) + shift_value = ((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK); + else + shift_value = (((gsm_dump_offset + offset) & + SHIFT_REG_64K_MASK) >> + SHIFT_REG_BIT_SHIFT); + + if (pm8001_ha->chip_id == chip_8001) { + gsm_base = GSM_BASE; + if (-1 == pm8001_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return -EIO; + } else { + gsm_base = 0; + if (-1 == pm80xx_bar4_shift(pm8001_ha, + (gsm_base + shift_value))) + return -EIO; + } + gsm_dump_offset = (gsm_dump_offset + offset) & + 0xFFFF0000; + work_offset = 0; + offset = offset & 0x0000FFFF; + } + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + direct_data += sprintf(direct_data, "%08x ", value); + offset += 4; + } + if (rem != 0) { + value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & + 0x0000FFFF); + /* xfr for non_dw */ + direct_data += sprintf(direct_data, "%08x ", value); + } + /* Shift back to BAR4 original address */ + if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) + return -EIO; + pm8001_ha->fatal_forensic_shift_offset += 1024; + + if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) + pm8001_ha->fatal_forensic_shift_offset = 0; + return direct_data - buf; +} + +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 state) { @@ -4648,7 +5044,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4673,7 +5069,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.sata_hol_tmo = cpu_to_le32(80); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4706,4 +5102,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = { .set_dev_state_req = pm8001_chip_set_dev_state_req, .sas_re_init_req = pm8001_chip_sas_re_initialization, }; - diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index d437309cb1e..e4867e690c8 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,12 @@ #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x04 << 8) +/* for phy state */ + +#define PHY_STATE_LINK_UP_SPC 0x1 + +/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ +#define GSM_SM_BASE 0x4F0000 struct mpi_msg_hdr{ __le32 header; /* Bits [11:0] - Message operation code */ /* Bits [15:12] - Message Category */ @@ -298,7 +304,7 @@ struct local_phy_ctl_resp { #define OP_BITS 0x0000FF00 -#define ID_BITS 0x0000000F +#define ID_BITS 0x000000FF /* * brief the data structure of PORT Control Command @@ -1025,5 +1031,8 @@ struct set_dev_state_resp { #define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 +#define GSM_BASE 0x4F0000 +#define SHIFT_REG_64K_MASK 0xffff0000 +#define SHIFT_REG_BIT_SHIFT 8 #endif diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 3d5e522e00f..e90c89f1d48 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -44,8 +44,19 @@ static struct scsi_transport_template *pm8001_stt; +/** + * chip info structure to identify chip key functionality as + * encryption available/not, no of ports, hw specific function ref + */ static const struct pm8001_chip_info pm8001_chips[] = { - [chip_8001] = { 8, &pm8001_8001_dispatch,}, + [chip_8001] = {0, 8, &pm8001_8001_dispatch,}, + [chip_8008] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, + [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, + [chip_8074] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8076] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8077] = {0, 16, &pm8001_80xx_dispatch,}, }; static int pm8001_id; @@ -155,37 +166,72 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } #ifdef PM8001_USE_TASKLET + +/** + * tasklet for 64 msi-x interrupt handler + * @opaque: the passed general host adapter struct + * Note: pm8001_tasklet is common for pm8001 & pm80xx + */ static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; - pm8001_ha = (struct pm8001_hba_info *)opaque; + struct isr_param *irq_vector; + + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; if (unlikely(!pm8001_ha)) BUG_ON(1); - PM8001_CHIP_DISP->isr(pm8001_ha); + PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); } #endif +/** + * pm8001_interrupt_handler_msix - main MSIX interrupt handler. + * It obtains the vector number and calls the equivalent bottom + * half or services directly. + * @opaque: the passed outbound queue/vector. Host structure is + * retrieved from the same. + */ +static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) +{ + struct isr_param *irq_vector; + struct pm8001_hba_info *pm8001_ha; + irqreturn_t ret = IRQ_HANDLED; + irq_vector = (struct isr_param *)opaque; + pm8001_ha = irq_vector->drv_inst; + + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) + return IRQ_NONE; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id); +#endif + return ret; +} + +/** + * pm8001_interrupt_handler_intx - main INTx interrupt handler. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + */ - /** - * pm8001_interrupt - when HBA originate a interrupt,we should invoke this - * dispatcher to handle each case. - * @irq: irq number. - * @opaque: the passed general host adapter struct - */ -static irqreturn_t pm8001_interrupt(int irq, void *opaque) +static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) { struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; - struct sas_ha_struct *sha = opaque; + struct sas_ha_struct *sha = dev_id; pm8001_ha = sha->lldd_ha; if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; + #ifdef PM8001_USE_TASKLET - tasklet_schedule(&pm8001_ha->tasklet); + tasklet_schedule(&pm8001_ha->tasklet[0]); #else - ret = PM8001_CHIP_DISP->isr(pm8001_ha); + ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); #endif return ret; } @@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque) * @pm8001_ha:our hba structure. * */ -static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) +static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, + const struct pci_device_id *ent) { int i; spin_lock_init(&pm8001_ha->lock); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("pm8001_alloc: PHY:%x\n", + pm8001_ha->chip->n_phy)); for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_phy_init(pm8001_ha, i); pm8001_ha->port[i].wide_port_phymap = 0; @@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].alignment = 32; - /* MPI Memory region 3 for consumer Index of inbound queues */ - pm8001_ha->memoryMap.region[CI].num_elements = 1; - pm8001_ha->memoryMap.region[CI].element_size = 4; - pm8001_ha->memoryMap.region[CI].total_len = 4; - pm8001_ha->memoryMap.region[CI].alignment = 4; - - /* MPI Memory region 4 for producer Index of outbound queues */ - pm8001_ha->memoryMap.region[PI].num_elements = 1; - pm8001_ha->memoryMap.region[PI].element_size = 4; - pm8001_ha->memoryMap.region[PI].total_len = 4; - pm8001_ha->memoryMap.region[PI].alignment = 4; - - /* MPI Memory region 5 inbound queues */ - pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[IB].element_size = 64; - pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[IB].alignment = 64; - - /* MPI Memory region 6 outbound queues */ - pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[OB].element_size = 64; - pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[OB].alignment = 64; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[CI+i].num_elements = 1; + pm8001_ha->memoryMap.region[CI+i].element_size = 4; + pm8001_ha->memoryMap.region[CI+i].total_len = 4; + pm8001_ha->memoryMap.region[CI+i].alignment = 4; + + if ((ent->driver_data) != chip_8001) { + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 128; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[IB+i].alignment = 128; + } else { + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 64; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[IB+i].alignment = 64; + } + } + + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[PI+i].num_elements = 1; + pm8001_ha->memoryMap.region[PI+i].element_size = 4; + pm8001_ha->memoryMap.region[PI+i].total_len = 4; + pm8001_ha->memoryMap.region[PI+i].alignment = 4; + + if (ent->driver_data != chip_8001) { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 128; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[OB+i].alignment = 128; + } else { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 64; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[OB+i].alignment = 64; + } + } /* Memory region write DMA*/ pm8001_ha->memoryMap.region[NVMD].num_elements = 1; pm8001_ha->memoryMap.region[NVMD].element_size = 4096; @@ -264,6 +341,13 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * sizeof(struct pm8001_ccb_info); + /* Memory region for fw flash */ + pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + + pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1; + pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000; + pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000; for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, @@ -281,7 +365,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; for (i = 0; i < PM8001_MAX_DEVICES; i++) { - pm8001_ha->devices[i].dev_type = NO_DEVICE; + pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; pm8001_ha->devices[i].id = i; pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; pm8001_ha->devices[i].running_req = 0; @@ -339,9 +423,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) ioremap(pm8001_ha->io_mem[logicalBar].membase, pm8001_ha->io_mem[logicalBar].memsize); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("PCI: bar %d, logicalBar %d " - "virt_addr=%lx,len=%d\n", bar, logicalBar, - (unsigned long) + pm8001_printk("PCI: bar %d, logicalBar %d ", + bar, logicalBar)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "base addr %llx virt_addr=%llx len=%d\n", + (u64)pm8001_ha->io_mem[logicalBar].membase, + (u64)(unsigned long) pm8001_ha->io_mem[logicalBar].memvirtaddr, pm8001_ha->io_mem[logicalBar].memsize)); } else { @@ -361,12 +448,13 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) * @shost: scsi host struct which has been initialized before. */ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, - u32 chip_id, - struct Scsi_Host *shost) + const struct pci_device_id *ent, + struct Scsi_Host *shost) + { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - + int j; pm8001_ha = sha->lldd_ha; if (!pm8001_ha) @@ -374,7 +462,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->pdev = pdev; pm8001_ha->dev = &pdev->dev; - pm8001_ha->chip_id = chip_id; + pm8001_ha->chip_id = ent->driver_data; pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; pm8001_ha->irq = pdev->irq; pm8001_ha->sas = sha; @@ -382,12 +470,24 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->id = pm8001_id++; pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); + /* IOMB size is 128 for 8088/89 controllers */ + if (pm8001_ha->chip_id != chip_8001) + pm8001_ha->iomb_size = IOMB_SIZE_SPCV; + else + pm8001_ha->iomb_size = IOMB_SIZE_SPC; + #ifdef PM8001_USE_TASKLET - tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); #endif pm8001_ioremap(pm8001_ha); - if (!pm8001_alloc(pm8001_ha)) + if (!pm8001_alloc(pm8001_ha, ent)) return pm8001_ha; pm8001_free(pm8001_ha); return NULL; @@ -512,21 +612,53 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) { - u8 i; + u8 i, j; #ifdef PM8001_READ_VPD + /* For new SPC controllers WWN is stored in flash vpd + * For SPC/SPCve controllers WWN is stored in EEPROM + * For Older SPC WWN is stored in NVMD + */ DECLARE_COMPLETION_ONSTACK(completion); struct pm8001_ioctl_payload payload; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); pm8001_ha->nvmd_completion = &completion; - payload.minor_function = 0; - payload.length = 128; - payload.func_specific = kzalloc(128, GFP_KERNEL); + + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081 || deviceid == 0x0042) { + payload.minor_function = 4; + payload.length = 4096; + } else { + payload.minor_function = 0; + payload.length = 128; + } + } else { + payload.minor_function = 1; + payload.length = 4096; + } + payload.offset = 0; + payload.func_specific = kzalloc(payload.length, GFP_KERNEL); PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); + + for (i = 0, j = 0; i <= 7; i++, j++) { + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x704 + i]; + else if (deviceid == 0x0042) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x010 + i]; + } else + pm8001_ha->sas_addr[j] = + payload.func_specific[0x804 + i]; + } + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { - memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, - SAS_ADDR_SIZE); + memcpy(&pm8001_ha->phy[i].dev_sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("phy %d sas_addr = %016llx \n", i, + pm8001_printk("phy %d sas_addr = %016llx\n", i, pm8001_ha->phy[i].dev_sas_addr)); } #else @@ -541,37 +673,84 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) #endif } +/* + * pm8001_get_phy_settings_info : Read phy setting values. + * @pm8001_ha : our hba. + */ +static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +{ + +#ifdef PM8001_READ_VPD + /*OPTION ROM FLASH read for the SPC cards */ + DECLARE_COMPLETION_ONSTACK(completion); + struct pm8001_ioctl_payload payload; + + pm8001_ha->nvmd_completion = &completion; + /* SAS ADDRESS read from flash / EEPROM */ + payload.minor_function = 6; + payload.offset = 0; + payload.length = 4096; + payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; + /* Read phy setting values from flash */ + PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); + wait_for_completion(&completion); + pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); + kfree(payload.func_specific); +#endif + return 0; +} + #ifdef PM8001_USE_MSIX /** * pm8001_setup_msix - enable MSI-X interrupt * @chip_info: our ha struct. * @irq_handler: irq_handler */ -static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, - irq_handler_t irq_handler) +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) { u32 i = 0, j = 0; - u32 number_of_intr = 1; + u32 number_of_intr; int flag = 0; u32 max_entry; int rc; + static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; + + /* SPCv controllers supports 64 msi-x */ + if (pm8001_ha->chip_id == chip_8001) { + number_of_intr = 1; + } else { + number_of_intr = PM8001_MAX_MSIX_VEC; + flag &= ~IRQF_SHARED; + } + max_entry = sizeof(pm8001_ha->msix_entries) / sizeof(pm8001_ha->msix_entries[0]); - flag |= IRQF_DISABLED; for (i = 0; i < max_entry ; i++) pm8001_ha->msix_entries[i].entry = i; rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; if (!rc) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "pci_enable_msix request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr)); + + for (i = 0; i < number_of_intr; i++) { + snprintf(intr_drvname[i], sizeof(intr_drvname[0]), + DRV_NAME"%d", i); + pm8001_ha->irq_vector[i].irq_id = i; + pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; + if (request_irq(pm8001_ha->msix_entries[i].vector, - irq_handler, flag, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost))) { + pm8001_interrupt_handler_msix, flag, + intr_drvname[i], &(pm8001_ha->irq_vector[i]))) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pm8001_ha->pdev); break; } @@ -588,22 +767,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { struct pci_dev *pdev; - irq_handler_t irq_handler = pm8001_interrupt; int rc; pdev = pm8001_ha->pdev; #ifdef PM8001_USE_MSIX - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - return pm8001_setup_msix(pm8001_ha, irq_handler); - else + if (pdev->msix_cap) + return pm8001_setup_msix(pm8001_ha); + else { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MSIX not supported!!!\n")); goto intx; + } #endif intx: /* initialize the INT-X interrupt */ - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; } @@ -621,12 +802,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev, { unsigned int rc; u32 pci_reg; + u8 i = 0; struct pm8001_hba_info *pm8001_ha; struct Scsi_Host *shost = NULL; const struct pm8001_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, - "pm8001: driver version %s\n", DRV_VERSION); + "pm80xx: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; @@ -665,26 +847,47 @@ static int pm8001_pci_probe(struct pci_dev *pdev, goto err_out_free; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); - pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); + /* ent->driver variable is used to differentiate between controllers */ + pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); if (!pm8001_ha) { rc = -ENOMEM; goto err_out_free; } list_add_tail(&pm8001_ha->list, &hba_list); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "chip_init failed [ret: %d]\n", rc)); goto err_out_ha_free; + } rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha_free; rc = pm8001_request_irq(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "pm8001_request_irq failed [ret: %d]\n", rc)); goto err_out_shost; + } + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + /* setup thermal configuration. */ + pm80xx_set_thermal_config(pm8001_ha); + } - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); pm8001_init_sas_add(pm8001_ha); + /* phy setting support for motherboard controller */ + if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && + pdev->subsystem_vendor != 0) { + rc = pm8001_get_phy_settings_info(pm8001_ha); + if (rc) + goto err_out_shost; + } pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) @@ -712,27 +915,32 @@ static void pm8001_pci_remove(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; - int i; + int i, j; pm8001_ha = sha->lldd_ha; - pci_set_drvdata(pdev, NULL); sas_unregister_ha(sha); sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); scsi_remove_host(pm8001_ha->shost); - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET - tasklet_kill(&pm8001_ha->tasklet); + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); #endif pm8001_free(pm8001_ha); kfree(sha->sas_phy); @@ -753,29 +961,34 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; - int i , pos; + int i, j; u32 device_state; pm8001_ha = sha->lldd_ha; flush_workqueue(pm8001_wq); scsi_block_requests(pm8001_ha->shost); - pos = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (pos == 0) { - printk(KERN_ERR " PCI PM not supported\n"); + if (!pdev->pm_cap) { + dev_err(&pdev->dev, " PCI PM not supported\n"); return -ENODEV; } - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &(pm8001_ha->irq_vector[i])); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); #endif #ifdef PM8001_USE_TASKLET - tasklet_kill(&pm8001_ha->tasklet); + /* For non-msix and msix interrupts */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_kill(&pm8001_ha->tasklet[0]); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_kill(&pm8001_ha->tasklet[j]); #endif device_state = pci_choose_state(pdev, state); pm8001_printk("pdev=0x%p, slot=%s, entering " @@ -798,6 +1011,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; + u8 i = 0, j; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; @@ -820,19 +1034,37 @@ static int pm8001_pci_resume(struct pci_dev *pdev) if (rc) goto err_out_disable; - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + /* chip soft rst only for spc */ + if (pm8001_ha->chip_id == chip_8001) { + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip soft reset successful\n")); + } rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); if (rc) goto err_out_disable; - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + + /* disable all the interrupt bits */ + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + rc = pm8001_request_irq(pm8001_ha); if (rc) goto err_out_disable; - #ifdef PM8001_USE_TASKLET - tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); - #endif - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); +#ifdef PM8001_USE_TASKLET + /* Tasklet for non msi-x interrupt handler */ + if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) + tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[0])); + else + for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) + tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, + (unsigned long)&(pm8001_ha->irq_vector[j])); +#endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + } scsi_unblock_requests(pm8001_ha->shost); return 0; @@ -843,14 +1075,66 @@ err_out_enable: return rc; } +/* update of pci device, vendor id and driver data with + * unique value for each of the controller + */ static struct pci_device_id pm8001_pci_table[] = { - { - PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 - }, - { - PCI_DEVICE(0x117c, 0x0042), - .driver_data = chip_8001 - }, + { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, + { PCI_VDEVICE(ATTO, 0x0042), chip_8001 }, + /* Support for SPC/SPCv/SPCve controllers */ + { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, + { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 }, + { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 }, + { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 }, + { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 }, + { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, + { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, + { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 }, + { PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 }, + { PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 }, + { PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 }, + { PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 }, + { PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8076, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8077, + PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8074, + PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 }, {} /* terminate list */ }; @@ -870,7 +1154,7 @@ static int __init pm8001_init(void) { int rc = -ENOMEM; - pm8001_wq = alloc_workqueue("pm8001", 0, 0); + pm8001_wq = alloc_workqueue("pm80xx", 0, 0); if (!pm8001_wq) goto err; @@ -902,7 +1186,12 @@ module_init(pm8001_init); module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); -MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); +MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); +MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); +MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>"); +MODULE_DESCRIPTION( + "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " + "SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pm8001_pci_table); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index b961112395d..8a44bc92bc7 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) clear_bit(tag, bitmap); } -static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { pm8001_tag_clear(pm8001_ha, tag); } @@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_GET_EVENTS: spin_lock_irqsave(&pm8001_ha->lock, flags); - if (-1 == pm8001_bar4_shift(pm8001_ha, + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, (phy_id < 4) ? 0x30000 : 0x40000)) { - spin_unlock_irqrestore(&pm8001_ha->lock, flags); - return -EINVAL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -EINVAL; + } } { struct sas_phy *phy = sas_phy->phy; @@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, phy->loss_of_dword_sync_count = qp[3]; phy->phy_reset_problem_count = qp[4]; } - pm8001_bar4_shift(pm8001_ha, 0); + if (pm8001_ha->chip_id == chip_8001) + pm8001_bar4_shift(pm8001_ha, 0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return 0; default: @@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; - PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); + /* SAS_RE_INITIALIZATION not available in SPCv/ve */ + if (pm8001_ha->chip_id == chip_8001) + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } @@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev) * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ - ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) + ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { @@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) t->task_done(t); return 0; } @@ -429,6 +434,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, ccb->n_elem = n_elem; ccb->ccb_tag = tag; ccb->task = t; + ccb->device = pm8001_dev; switch (t->task_proto) { case SAS_PROTOCOL_SMP: rc = pm8001_task_prep_smp(pm8001_ha, ccb); @@ -442,7 +448,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num, break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: rc = pm8001_task_prep_ata(pm8001_ha, ccb); break; default: @@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { + if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { pm8001_ha->devices[dev].id = dev; return &pm8001_ha->devices[dev]; } @@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) } return NULL; } +/** + * pm8001_find_dev - find a matching pm8001_device + * @pm8001_ha: our hba card information + */ +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].device_id == device_id) + return &pm8001_ha->devices[dev]; + } + if (dev == PM8001_MAX_DEVICES) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING " + "DEVICE FOUND !!!\n")); + } + return NULL; +} static void pm8001_free_dev(struct pm8001_device *pm8001_dev) { u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); pm8001_dev->id = id; - pm8001_dev->dev_type = NO_DEVICE; + pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; } @@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) res = -1; } } else { - if (dev->dev_type == SATA_DEV) { + if (dev->dev_type == SAS_SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; flag = 1; /* directly sata*/ @@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) msleep(50); pm8001_ha->flags = PM8001F_RUN_TIME; return 0; @@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -static void pm8001_task_done(struct sas_task *task) +void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; @@ -681,6 +704,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, int res, retry; struct sas_task *task = NULL; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + struct pm8001_device *pm8001_dev = dev->lldd_dev; + DECLARE_COMPLETION_ONSTACK(completion_setstate); for (retry = 0; retry < 3; retry++) { task = sas_alloc_slow_task(GFP_KERNEL); @@ -706,6 +731,12 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, goto ex_err; } wait_for_completion(&task->slow_task->completion); + if (pm8001_ha->chip_id != chip_8001) { + pm8001_dev->setds_completion = &completion_setstate; + PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, + pm8001_dev, 0x01); + wait_for_completion(&completion_setstate); + } res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { @@ -835,13 +866,11 @@ ex_err: static void pm8001_dev_gone_notify(struct domain_device *dev) { unsigned long flags = 0; - u32 tag; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); spin_lock_irqsave(&pm8001_ha->lock, flags); - pm8001_tag_alloc(pm8001_ha, &tag); if (pm8001_dev) { u32 device_id = pm8001_dev->device_id; @@ -904,7 +933,7 @@ void pm8001_open_reject_retry( struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; pm8001_dev = ccb->device; - if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) + if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) continue; if (!device_to_close) { uintptr_t d = (uintptr_t)pm8001_dev @@ -995,6 +1024,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev) return rc; } +/* +* This function handle the IT_NEXUS_XXX event or completion +* status code for SSP/SATA/SMP I/O request. +*/ +int pm8001_I_T_nexus_event_handler(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + u32 device_id = 0; + + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + device_id = pm8001_dev->device_id; + pm8001_ha = pm8001_find_ha_by_dev(dev); + + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("I_T_Nexus handler invoked !!")); + + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + pm8001_dev->setds_completion = &completion_setstate; + + wait_for_completion(&completion_setstate); + } else { + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc)); +out: + sas_put_local_phy(phy); + + return rc; +} /* mandatory SAM-3, the task reset the specified LUN*/ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) { @@ -1002,15 +1097,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); + DECLARE_COMPLETION_ONSTACK(completion_setstate); if (dev_is_sata(dev)) { struct sas_phy *phy = sas_get_local_phy(dev); rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); rc = sas_phy_reset(phy, 1); sas_put_local_phy(phy); + pm8001_dev->setds_completion = &completion_setstate; rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, pm8001_dev, 0x01); - msleep(2000); + wait_for_completion(&completion_setstate); } else { tmf_task.tmf = TMF_LU_RESET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 11008205aeb..1ee06f21803 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -57,8 +57,8 @@ #include <linux/atomic.h> #include "pm8001_defs.h" -#define DRV_NAME "pm8001" -#define DRV_VERSION "0.1.36" +#define DRV_NAME "pm80xx" +#define DRV_VERSION "0.1.37" #define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ #define PM8001_INIT_LOGGING 0x02 /* driver init logging */ #define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ @@ -66,8 +66,8 @@ #define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ #define PM8001_MSG_LOGGING 0x40 /* misc message logging */ -#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ - __func__, __LINE__, ## arg) +#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \ + format, __func__, __LINE__, ## arg) #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ do { \ if (unlikely(HBA->logging_level & LEVEL)) \ @@ -103,11 +103,15 @@ do { \ #define PM8001_READ_VPD -#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) +#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) +#define IS_SPCV_12G(dev) ((dev->device == 0X8074) \ + || (dev->device == 0X8076) \ + || (dev->device == 0X8077)) #define PM8001_NAME_LENGTH 32/* generic length of strings */ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; +extern const struct pm8001_dispatch pm8001_80xx_dispatch; struct pm8001_hba_info; struct pm8001_ccb_info; @@ -128,18 +132,73 @@ struct pm8001_ioctl_payload { u8 *func_specific; }; +#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF +#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24) +#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET 0x00 /* HNFBUFL */ +#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET 0x04 /* HNFBUFH */ +#define MPI_FATAL_EDUMP_TABLE_LENGTH 0x08 /* HNFBLEN */ +#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */ +#define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */ +#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */ +#define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1 +#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED 0x1 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE 0x3 +#define TYPE_GSM_SPACE 1 +#define TYPE_QUEUE 2 +#define TYPE_FATAL 3 +#define TYPE_NON_FATAL 4 +#define TYPE_INBOUND 1 +#define TYPE_OUTBOUND 2 +struct forensic_data { + u32 data_type; + union { + struct { + u32 direct_len; + u32 direct_offset; + void *direct_data; + } gsm_buf; + struct { + u16 queue_type; + u16 queue_index; + u32 direct_len; + void *direct_data; + } queue_buf; + struct { + u32 direct_len; + u32 direct_offset; + u32 read_len; + void *direct_data; + } data_buf; + }; +}; + +/* bit31-26 - mask bar */ +#define SCRATCH_PAD0_BAR_MASK 0xFC000000 +/* bit25-0 - offset mask */ +#define SCRATCH_PAD0_OFFSET_MASK 0x03FFFFFF +/* if AAP error state */ +#define SCRATCH_PAD0_AAPERR_MASK 0xFFFFFFFF +/* Inbound doorbell bit7 */ +#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP 0x80 +/* Inbound doorbell bit7 SPCV */ +#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO 0x80 +#define MAIN_MERRDCTO_MERRDCES 0xA0/* DWORD 0x28) */ + struct pm8001_dispatch { char *name; int (*chip_init)(struct pm8001_hba_info *pm8001_ha); - int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha); void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); - irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec); u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); - int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec); void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); int (*smp_req)(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb); @@ -173,6 +232,7 @@ struct pm8001_dispatch { }; struct pm8001_chip_info { + u32 encrypt; u32 n_phy; const struct pm8001_dispatch *dispatch; }; @@ -204,7 +264,7 @@ struct pm8001_phy { }; struct pm8001_device { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct domain_device *sas_device; u32 attached_phy; u32 id; @@ -256,7 +316,20 @@ struct mpi_mem_req { struct mpi_mem region[USI_MAX_MEMCNT]; }; -struct main_cfg_table { +struct encrypt { + u32 cipher_mode; + u32 sec_mode; + u32 status; + u32 flag; +}; + +struct sas_phy_attribute_table { + u32 phystart1_16[16]; + u32 outbound_hw_event_pid1_16[16]; +}; + +union main_cfg_table { + struct { u32 signature; u32 interface_rev; u32 firmware_rev; @@ -292,19 +365,70 @@ struct main_cfg_table { u32 fatal_err_dump_length1; u32 hda_mode_flag; u32 anolog_setup_table_offset; + u32 rsvd[4]; + } pm8001_tbl; + + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 rsvd[8]; + u32 crc_core_dump; + u32 rsvd1; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_severity; + u32 upper_pcs_event_log_addr; + u32 lower_pcs_event_log_addr; + u32 pcs_event_log_size; + u32 pcs_event_log_severity; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 gpio_led_mapping; + u32 analog_setup_table_offset; + u32 int_vec_table_offset; + u32 phy_attr_table_offset; + u32 port_recovery_timer; + u32 interrupt_reassertion_delay; + u32 fatal_n_non_fatal_dump; /* 0x28 */ + } pm80xx_tbl; }; -struct general_status_table { + +union general_status_table { + struct { u32 gst_len_mpistate; u32 iq_freeze_state0; u32 iq_freeze_state1; u32 msgu_tcnt; u32 iop_tcnt; - u32 reserved; + u32 rsvd; u32 phy_state[8]; - u32 reserved1; - u32 reserved2; - u32 reserved3; + u32 gpio_input_val; + u32 rsvd1[2]; u32 recover_err_info[8]; + } pm8001_tbl; + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd[9]; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm80xx_tbl; }; struct inbound_queue_table { u32 element_pri_size_cnt; @@ -342,6 +466,10 @@ struct pm8001_hba_memspace { u64 membase; u32 memsize; }; +struct isr_param { + struct pm8001_hba_info *drv_inst; + u32 irq_id; +}; struct pm8001_hba_info { char name[PM8001_NAME_LENGTH]; struct list_head list; @@ -351,15 +479,29 @@ struct pm8001_hba_info { struct device *dev; struct pm8001_hba_memspace io_mem[6]; struct mpi_mem_req memoryMap; + struct encrypt encrypt_info; /* support encryption */ + struct forensic_data forensic_info; + u32 fatal_bar_loc; + u32 forensic_last_offset; + u32 fatal_forensic_shift_offset; + u32 forensic_fatal_step; + u32 evtlog_ib_offset; + u32 evtlog_ob_offset; void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ - struct main_cfg_table main_cfg_tbl; - struct general_status_table gs_tbl; - struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; - struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + void __iomem *pspa_q_tbl_addr; + /*MPI SAS PHY attributes Queue Config Table Addr*/ + void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + void __iomem *fatal_tbl_addr; /*MPI IVT Table Addr */ + union main_cfg_table main_cfg_tbl; + union general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM]; + struct sas_phy_attribute_table phy_attr_table; + /* MPI SAS PHY attributes */ u8 sas_addr[SAS_ADDR_SIZE]; struct sas_ha_struct *sas;/* SCSI/SAS glue */ struct Scsi_Host *shost; @@ -372,18 +514,22 @@ struct pm8001_hba_info { struct pm8001_port port[PM8001_MAX_PHYS]; u32 id; u32 irq; + u32 iomb_size; /* SPC and SPCV IOMB size */ struct pm8001_device *devices; struct pm8001_ccb_info *ccb_info; #ifdef PM8001_USE_MSIX - struct msix_entry msix_entries[16];/*for msi-x interrupt*/ + struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC]; + /*for msi-x interrupt*/ int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET - struct tasklet_struct tasklet; + struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC]; #endif u32 logging_level; u32 fw_status; + u32 smp_exp_mode; const struct firmware *fw_image; + struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; }; struct pm8001_work { @@ -419,6 +565,9 @@ struct pm8001_fw_image_header { #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 #define FLASH_UPDATE_DISABLED 0x11 +#define NCQ_READ_LOG_FLAG 0x80000000 +#define NCQ_ABORT_ALL_FLAG 0x40000000 +#define NCQ_2ND_RLE_FLAG 0x20000000 /** * brief param structure for firmware flash update. */ @@ -484,6 +633,7 @@ int pm8001_dev_found(struct domain_device *dev); void pm8001_dev_gone(struct domain_device *dev); int pm8001_lu_reset(struct domain_device *dev, u8 *lun); int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_I_T_nexus_event_handler(struct domain_device *dev); int pm8001_query_task(struct sas_task *task); void pm8001_open_reject_retry( struct pm8001_hba_info *pm8001_ha, @@ -493,10 +643,82 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align); -int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + struct inbound_queue_table *circularQ, + u32 opCode, void *payload, u32 responseQueue); +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr); +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc); +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC); +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload); +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag); +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, + struct pm8001_tmf_task *tmf); +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, + u8 flag, u32 task_tag, u32 cmd_tag); +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id); +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd); +void pm8001_work_fn(struct work_struct *work); +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, + void *data, int handler); +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate); +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr); +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i); +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb); +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +struct sas_task *pm8001_alloc_task(void); +void pm8001_task_done(struct sas_task *task); +void pm8001_free_task(struct sas_task *task); +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id); +int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); +int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf); +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf); +ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf); /* ctl shared API */ extern struct device_attribute *pm8001_host_attrs[]; +static inline void +pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha, + struct sas_task *task, struct pm8001_ccb_info *ccb, + u32 ccb_idx) +{ + pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx); + smp_mb(); /*in order to force CPU ordering*/ + spin_unlock(&pm8001_ha->lock); + task->task_done(task); + spin_lock(&pm8001_ha->lock); +} + #endif diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c new file mode 100644 index 00000000000..d70587f9618 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -0,0 +1,4529 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 PMC-Sierra, Inc., + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include <linux/slab.h> + #include "pm8001_sas.h" + #include "pm80xx_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + +#define SMP_DIRECT 1 +#define SMP_INDIRECT 2 + + +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) +{ + u32 reg_val; + unsigned long start; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value); + /* confirm the setting is written */ + start = jiffies + HZ; /* 1 sec */ + do { + reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER); + } while ((reg_val != shift_value) && time_before(jiffies, start)); + if (reg_val != shift_value) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" + " = 0x%x\n", reg_val)); + return -1; + } + return 0; +} + +void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, + const void *destination, + u32 dw_count, u32 bus_base_number) +{ + u32 index, value, offset; + u32 *destination1; + destination1 = (u32 *)destination; + + for (index = 0; index < dw_count; index += 4, destination1++) { + offset = (soffset + index / 4); + if (offset < (64 * 1024)) { + value = pm8001_cr32(pm8001_ha, bus_base_number, offset); + *destination1 = cpu_to_le32(value); + } + } + return; +} + +ssize_t pm80xx_get_fatal_dump(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; + u32 accum_len , reg_val, index, *temp; + unsigned long start; + u8 *direct_data; + char *fatal_error_data = buf; + + pm8001_ha->forensic_info.data_buf.direct_data = buf; + if (pm8001_ha->chip_id == chip_8001) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "Not supported for SPC controller"); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("forensic_info TYPE_NON_FATAL..............\n")); + direct_data = (u8 *)fatal_error_data; + pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; + pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; + pm8001_ha->forensic_info.data_buf.read_len = 0; + + pm8001_ha->forensic_info.data_buf.direct_data = direct_data; + + /* start to get data */ + /* Program the MEMBASE II Shifting Register with 0x00.*/ + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->forensic_last_offset = 0; + pm8001_ha->forensic_fatal_step = 0; + pm8001_ha->fatal_bar_loc = 0; + } + + /* Read until accum_len is retrived */ + accum_len = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n", + accum_len)); + if (accum_len == 0xFFFFFFFF) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Possible PCI issue 0x%x not expected\n", + accum_len)); + return -EIO; + } + if (accum_len == 0 || accum_len >= 0x100000) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; + if (pm8001_ha->forensic_fatal_step == 0) { +moreData: + if (pm8001_ha->forensic_info.data_buf.direct_data) { + /* Data is in bar, copy to host memory */ + pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc, + pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, + pm8001_ha->forensic_info.data_buf.direct_len , + 1); + } + pm8001_ha->fatal_bar_loc += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.direct_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_last_offset += + pm8001_ha->forensic_info.data_buf.direct_len; + pm8001_ha->forensic_info.data_buf.read_len = + pm8001_ha->forensic_info.data_buf.direct_len; + + if (pm8001_ha->forensic_last_offset >= accum_len) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 3); + for (index = 0; index < (SYSFS_OFFSET / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + + pm8001_ha->fatal_bar_loc = 0; + pm8001_ha->forensic_fatal_step = 1; + pm8001_ha->fatal_forensic_shift_offset = 0; + pm8001_ha->forensic_last_offset = 0; + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->fatal_bar_loc < (64 * 1024)) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < (SYSFS_OFFSET / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + return (char *)pm8001_ha-> + forensic_info.data_buf.direct_data - + (char *)buf; + } + + /* Increment the MEMBASE II Shifting Register value by 0x100.*/ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 2); + for (index = 0; index < 256; index++) { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); + } + pm8001_ha->fatal_forensic_shift_offset += 0x100; + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_ha->fatal_bar_loc = 0; + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; + } + if (pm8001_ha->forensic_fatal_step == 1) { + pm8001_ha->fatal_forensic_shift_offset = 0; + /* Read 64K of the debug data. */ + pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, + MPI_FATAL_EDUMP_HANDSHAKE_RDY); + + /* Poll FDDHSHK until clear */ + start = jiffies + (2 * HZ); /* 2 sec */ + + do { + reg_val = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE); + } while ((reg_val) && time_before(jiffies, start)); + + if (reg_val != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" + " = 0x%x\n", reg_val)); + return -EIO; + } + + /* Read the next 64K of the debug data. */ + pm8001_ha->forensic_fatal_step = 0; + if (pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS) != + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0); + goto moreData; + } else { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf(pm8001_ha-> + forensic_info.data_buf.direct_data, + "%08x ", 4); + pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; + pm8001_ha->forensic_info.data_buf.direct_len = 0; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + } + } + + return (char *)pm8001_ha->forensic_info.data_buf.direct_data - + (char *)buf; +} + +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = + pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = + pm8001_mr32(address, MAIN_INTERFACE_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = + pm8001_mr32(address, MAIN_FW_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = + pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = + pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = + pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = + pm8001_mr32(address, MAIN_GST_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); + + /* read GPIO LED settings from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = + pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = + pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = + pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = + pm8001_mr32(address, GST_MSGUTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = + pm8001_mr32(address, GST_IOPTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = + pm8001_mr32(address, GST_GPIO_INPUT_VAL); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = + pm8001_mr32(address, GST_RERRINFO_OFFSET0); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = + pm8001_mr32(address, GST_RERRINFO_OFFSET1); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = + pm8001_mr32(address, GST_RERRINFO_OFFSET2); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = + pm8001_mr32(address, GST_RERRINFO_OFFSET3); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = + pm8001_mr32(address, GST_RERRINFO_OFFSET4); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = + pm8001_mr32(address, GST_RERRINFO_OFFSET5); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = + pm8001_mr32(address, GST_RERRINFO_OFFSET6); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = + pm8001_mr32(address, GST_RERRINFO_OFFSET7); +} +/** + * read_phy_attr_table - read the phy attribute table and save it. + * @pm8001_ha: our hba card information + */ +static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->pspa_q_tbl_addr; + pm8001_ha->phy_attr_table.phystart1_16[0] = + pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[1] = + pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[2] = + pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[3] = + pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[4] = + pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[5] = + pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[6] = + pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[7] = + pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[8] = + pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[9] = + pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[10] = + pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[11] = + pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[12] = + pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[13] = + pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[14] = + pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[15] = + pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); + + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); + +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + IB_PIPCI_BAR))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + OB_CIPCI_BAR))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[IB + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[CI + i].virt_ptr; + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[OB + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; + /* interrupt vector based on oq */ + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[PI + i].virt_ptr; + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); + pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); + + /* SPCv specific */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; + /* Set GPIOLED to 0x2 for LED indicator */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; + pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); + pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = 4 * 1000 * 1000;/* 4 sec */ + } else { + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + } + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) + return -1; + /* check the MPI-State for initialization upto 100ms*/ + max_wait_count = 100 * 1000;/* 100 msec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + } while ((GST_MPI_STATE_INIT != + (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); + if (!max_wait_count) + return -1; + + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; + u32 max_wait_count; + u32 max_wait_time; + int ret = 0; + + /* reset / PCIe ready */ + max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while ((value == 0xFFFFFFFF) && (--max_wait_count)); + + /* check ila status */ + max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_ILA_READY) != + SCRATCH_PAD_ILA_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" ila ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check RAAE status */ + max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_RAAE_READY) != + SCRATCH_PAD_RAAE_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" raae ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop0 status */ + max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) && + (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" iop0 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop1 status only for 16 port controllers */ + if ((pm8001_ha->chip_id != chip_8008) && + (pm8001_ha->chip_id != chip_8009)) { + /* 200 milli sec */ + max_wait_time = max_wait_count = 200 * 1000; + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP1_READY) != + SCRATCH_PAD_IOP1_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "iop1 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + } + + return ret; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n", + offset, value)); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar)); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & + 0xFFFFFF); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & + 0xFFFFFF); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & + 0xFFFFFF); + pm8001_ha->ivt_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & + 0xFFFFFF); + pm8001_ha->pspa_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & + 0xFFFFFF); + pm8001_ha->fatal_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) & + 0xFFFFFF); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GST OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("INBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("OBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("IVT OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PSPA OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x90))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - main cfg %p general status %p\n", + pm8001_ha->main_cfg_tbl_addr, + pm8001_ha->general_stat_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - inbnd %p obnd %p\n", + pm8001_ha->inbnd_q_tbl_addr, + pm8001_ha->outbnd_q_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - pspa %p ivt %p\n", + pm8001_ha->pspa_q_tbl_addr, + pm8001_ha->ivt_tbl_addr)); +} + +/** + * pm80xx_set_thermal_config - support the thermal configuration + * @pm8001_ha: our hba card information. + */ +int +pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | THERMAL_OP_CODE; + payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return rc; + +} + +/** +* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol +* Timer configuration page +* @pm8001_ha: our hba card information. +*/ +static int +pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + SASProtocolTimerConfig_t SASConfigPage; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + + SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; + SASConfigPage.MST_MSI = 3 << 15; + SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; + SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; + SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; + + if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) + SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; + + + SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | + SAS_OPNRJT_RTRY_INTVL; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) + | SAS_COPNRJT_RTRY_TMO; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) + | SAS_COPNRJT_RTRY_THR; + SASConfigPage.MAX_AIP = SAS_MAX_AIP; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.pageCode " + "0x%08x\n", SASConfigPage.pageCode)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.MST_MSI " + " 0x%08x\n", SASConfigPage.MST_MSI)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO " + " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_FRM_TMO " + " 0x%08x\n", SASConfigPage.STP_FRM_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_IDLE_TMO " + " 0x%08x\n", SASConfigPage.STP_IDLE_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL " + " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP " + " 0x%08x\n", SASConfigPage.MAX_AIP)); + + memcpy(&payload.cfg_pg, &SASConfigPage, + sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_get_encrypt_info - Check for encryption + * @pm8001_ha: our hba card information. + */ +static int +pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) +{ + u32 scratch3_value; + int ret; + + /* Read encryption status from SCRATCH PAD 3 */ + scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_READY) { + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_ha->encrypt_info.status = 0; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X." + "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == + SCRATCH_PAD3_ENC_DISABLED) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", + scratch3_value)); + pm8001_ha->encrypt_info.status = 0xFFFFFFFF; + pm8001_ha->encrypt_info.cipher_mode = 0; + pm8001_ha->encrypt_info.sec_mode = 0; + return 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_DIS_ERR) { + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_ENA_ERR) { + + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } + return ret; +} + +/** + * pm80xx_encrypt_update - update flash with encryption informtion + * @pm8001_ha: our hba card information. + */ +static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) +{ + struct kek_mgmt_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_KEK_MANAGEMENT; + + memset(&payload, 0, sizeof(struct kek_mgmt_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + /* Currently only one key is used. New KEK index is 1. + * Current KEK index is 1. Store KEK to NVRAM is 1. + */ + payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + int ret; + u8 i = 0; + + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + read_phy_attr_table(pm8001_ha); + + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MPI initialize successful!\n")); + } else + return -EBUSY; + + /* send SAS protocol timer configuration page to FW */ + ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha); + + /* Check for encryption */ + if (pm8001_ha->chip->encrypt) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Checking for encryption\n")); + ret = pm80xx_get_encrypt_info(pm8001_ha); + if (ret == -1) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Encryption error !!\n")); + if (pm8001_ha->encrypt_info.status == 0x81) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled with error." + "Saving encryption key to flash\n")); + pm80xx_encrypt_update(pm8001_ha); + } + } + } + return 0; +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + if (IS_SPCV_12G(pm8001_ha->pdev)) { + max_wait_count = 4 * 1000 * 1000;/* 4 sec */ + } else { + max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + } + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:IBDB value/=%x\n", value)); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK)); + return -1; + } + + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + */ + +static int +pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regval; + u32 bootloader_state; + u32 ibutton0, ibutton1; + + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MPI state is not ready\n")); + return -1; + } + + /* checked for reset register normal state; 0x0 */ + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register before write : 0x%x\n", regval)); + + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); + mdelay(500); + + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register after write 0x%x\n", regval)); + + if ((regval & SPCv_SOFT_RESET_READ_MASK) == + SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset successful [regval: 0x%x]\n", + regval)); + } else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset failed [regval: 0x%x]\n", + regval)); + + /* check bootloader is successfully executed or in HDA mode */ + bootloader_state = + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_BOOTSTATE_MASK; + + if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode SEEPROM\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode Bootstrap Pin\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode soft reset\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state-HDA mode critical error\n")); + } + return -EBUSY; + } + + /* check the firmware status after reset */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + /* check iButton feature support for motherboard controller */ + if (pm8001_ha->pdev->subsystem_vendor != + PCI_VENDOR_ID_ADAPTEC2 && + pm8001_ha->pdev->subsystem_vendor != 0) { + ibutton0 = pm8001_cr32(pm8001_ha, 0, + MSGU_HOST_SCRATCH_PAD_6); + ibutton1 = pm8001_cr32(pm8001_ha, 0, + MSGU_HOST_SCRATCH_PAD_7); + if (!ibutton0 && !ibutton1) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("iButton Feature is" + " not Available!!!\n")); + return -EBUSY; + } + if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("CRC Check for iButton" + " Feature Failed!!!\n")); + return -EBUSY; + } + } + } + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPCv soft reset Complete\n")); + return 0; +} + +static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset start\n")); + + /* do SPCv chip reset. */ + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPC soft reset Complete\n")); + + /* Check this ..whether delay is required or no */ + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset finished\n")); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + mask = (u32)(1 << vec); + + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + if (vec == 0xFF) + mask = 0xFFFFFFFF; + else + mask = (u32)(1 << vec); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +} + +static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event represent + * that he has finished the job,please check the coresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive" + ":%016llx", SAS_ADDR(t->dev->sas_addr))); + + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SUCCESS ,param = 0x%x\n", + param)); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n", + param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("scsi_status = 0x%x\n ", + psspPayload->ssp_resp_iu.status)); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", event)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + return; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with event 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + int i, j; + u8 sata_addr_low[4]; + u32 temp_sata_addr_low, temp_sata_addr_hi; + u8 sata_addr_hi[4]; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + tag = le32_to_cpu(psataPayload->tag); + + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psataPayload->param); + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + if (!ts) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ts null\n")); + return; + } + /* Print sas address of IO failed device */ + if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && + (status != IO_UNDERFLOW)) { + if (!((t->dev->parent) && + (DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { + for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++) + sata_addr_low[i] = pm8001_ha->sas_addr[j]; + for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++) + sata_addr_hi[i] = pm8001_ha->sas_addr[j]; + memcpy(&temp_sata_addr_low, sata_addr_low, + sizeof(sata_addr_low)); + memcpy(&temp_sata_addr_hi, sata_addr_hi, + sizeof(sata_addr_hi)); + temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) + |((temp_sata_addr_hi << 8) & + 0xff0000) | + ((temp_sata_addr_hi >> 8) + & 0xff00) | + ((temp_sata_addr_hi << 24) & + 0xff000000)); + temp_sata_addr_low = ((((temp_sata_addr_low >> 24) + & 0xff) | + ((temp_sata_addr_low << 8) + & 0xff0000) | + ((temp_sata_addr_low >> 8) + & 0xff00) | + ((temp_sata_addr_low << 24) + & 0xff000000)) + + pm8001_dev->attached_phy + + 0x10); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%08x%08x", temp_sata_addr_hi, + temp_sata_addr_low)); + + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SAS Address of IO Failure Drive:" + "%016llx", SAS_ADDR(t->dev->sas_addr))); + } + } + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm80xx_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", + param)); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == PCI_DMA_FROMDEVICE) { + len = sizeof(struct pio_setup_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("PIO read len = %d\n", len)); + } else if (t->ata_task.use_ncq) { + len = sizeof(struct set_dev_bits_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("FPDMA len = %d\n", len)); + } else { + len = sizeof(struct dev_to_host_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("other len = %d\n", len)); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("response to large\n")); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW param = %d\n", param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + return; + } + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + unsigned long flags; + + ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + return; + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm80xx_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + if (unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + break; + case IO_XFER_PIO_SETUP_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_DMA_ACTIVATE_TIMEOUT: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param, i; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + char *pdma_respaddr = NULL; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("smp IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + switch (status) { + + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + if (pm8001_dev) + pm8001_dev->running_req--; + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("DIRECT RESPONSE Length:%d\n", + param)); + pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64 + ((u64)sg_dma_address + (&t->smp_task.smp_resp)))); + for (i = 0; i < param; i++) { + *(pdma_respaddr+i) = psmpPayload->_r_a[i]; + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "SMP Byte%d DMA data 0x%x psmp 0x%x\n", + i, *(pdma_respaddr+i), + psmpPayload->_r_a[i])); + } + } + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk(\ + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x" + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/** + * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + struct inbound_queue_table *circularQ; + + memset((u8 *)&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; + payload.tag = cpu_to_le32(1); + payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0xFF) << 24) | (port_id & 0xFF)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "portid:%d; phyid:%d; linkrate:%d; " + "portstate:%x; devicetype:%x\n", + port_id, phy_id, link_rate, portstate, deviceType)); + + switch (deviceType) { + case SAS_PHY_UNUSED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("device type no device.\n")); + break; + case SAS_END_DEVICE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("fanout expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unknown device type(%x)\n", deviceType)); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "port id %d, phy id %d link_rate %d portstate 0x%x\n", + port_id, phy_id, link_rate, portstate)); + + port->port_state = portstate; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + port->port_state = portstate; + phy->phy_type = 0; + phy->identify.device_type = 0; + phy->phy_attached = 0; + memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" PortInvalid portID %d\n", port_id)); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Port In Reset portID %d\n", port_id)); + break; + case PORT_NOT_ESTABLISHED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + port->port_attached = 0; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and(default) = 0x%x\n", + portstate)); + break; + + } +} + +static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_start_resp *pPayload = + (struct phy_start_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phy_id = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n", + status, phy_id)); + if (status == 0) { + phy->phy_state = 1; + if (pm8001_ha->flags == PM8001F_RUN_TIME) + complete(phy->enable_completion); + } + return 0; + +} + +/** + * mpi_thermal_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct thermal_hw_event *pPayload = + (struct thermal_hw_event *)(piomb + 4); + + u32 thermal_event = le32_to_cpu(pPayload->thermal_event); + u32 rht_lht = le32_to_cpu(pPayload->rht_lht); + + if (thermal_event & 0x40) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Local high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured local high temperature %d\n", + ((rht_lht & 0xFF00) >> 8))); + } + if (thermal_event & 0x10) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Remote high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured remote high temperature %d\n", + ((rht_lht & 0xFF000000) >> 24))); + } + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u16 eventType = + (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_status_evt_portid & 0x0F000000) >> 24); + + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status)); + + switch (eventType) { + + case HW_EVENT_SAS_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_START_STATUS\n")); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_PHY_UP\n")); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_SPINUP_HOLD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case HW_EVENT_PHY_DOWN: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_DOWN\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_INVALID\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_PHY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_ERROR\n")); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + case HW_EVENT_BROADCAST_EXP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_MALFUNCTION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_MALFUNCTION\n")); + break; + case HW_EVENT_BROADCAST_SES: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_SES\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_PORT_RECOVERY_TIMER_TMO, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVER: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVER\n")); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Unknown event type 0x%x\n", eventType)); + break; + } + return 0; +} + +/** + * mpi_phy_stop_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_stop_resp *pPayload = + (struct phy_stop_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phyid = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("phy:0x%x status:0x%x\n", + phyid, status)); + if (status == 0) + phy->phy_state = 0; + return 0; +} + +/** + * mpi_set_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_ctrl_cfg_resp *pPayload = + (struct set_ctrl_cfg_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", + status, err_qlfr_pgcd)); + + return 0; +} + +/** + * mpi_get_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_get_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_flash_op_ext_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_set_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + u8 page_code; + struct set_phy_profile_resp *pPayload = + (struct set_phy_profile_resp *)(piomb + 4); + u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid); + u32 status = le32_to_cpu(pPayload->status); + + page_code = (u8)((ppc_phyid & 0xFF00) >> 8); + if (status) { + /* status is FAILED */ + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("PhyProfile command failed with status " + "0x%08X \n", status)); + return -1; + } else { + if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Invalid page code 0x%X\n", + page_code)); + return -1; + } + } + return 0; +} + +/** + * mpi_kek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); + + u32 status = le32_to_cpu(pPayload->status); + u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); + u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", + status, kidx_new_curr_ksop, err_qlfr)); + + return 0; +} + +/** + * mpi_dek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * ssp_coalesced_comp_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); + + switch (opc) { + case OPC_OUB_ECHO: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n")); + break; + case OPC_OUB_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_HW_EVENT\n")); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_THERM_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_THERMAL_EVENT\n")); + mpi_thermal_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_COMP\n")); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_COMP\n")); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_REGIST\n")); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unregister the device\n")); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n")); + break; + case OPC_OUB_SATA_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_COMP\n")); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_EVENT\n")); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_EVENT\n")); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); + /*This is for target*/ + break; + case OPC_OUB_FW_FLASH_UPDATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); + break; + case OPC_OUB_GPIO_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_EVENT\n")); + break; + case OPC_OUB_GENERAL_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); + break; + case OPC_OUB_GET_TIME_STAMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); + break; + case OPC_OUB_PORT_CONTROL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_PORT_CONTROL\n")); + break; + case OPC_OUB_SMP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); + break; + case OPC_OUB_SET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); + break; + case OPC_OUB_SET_DEV_INFO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); + break; + /* spcv specifc commands */ + case OPC_OUB_PHY_START_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_START_RESP opcode:%x\n", opc)); + mpi_phy_start_resp(pm8001_ha, piomb); + break; + case OPC_OUB_PHY_STOP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc)); + mpi_phy_stop_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_set_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_get_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc)); + mpi_get_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_FLASH_OP_EXT: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc)); + mpi_flash_op_ext_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc)); + mpi_set_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_KEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_kek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_dek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COALESCED_COMP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc)); + ssp_coalesced_comp_resp(pm8001_ha, piomb); + break; + default: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Unknown outbound Queue IOMB OPC = 0x%x\n", opc)); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 uninitialized_var(bc); + u32 ret = MPI_IO_STATUS_FAIL; + unsigned long flags; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + do { + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return ret; +} + +/* PCI_DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ +}; + +static void build_smp_cmd(u32 deviceID, __le32 hTag, + struct smp_req *psmp_cmd, int mode, int length) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + if (mode == SMP_DIRECT) { + length = length - 4; /* subtract crc */ + psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); + } else { + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); + } +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + struct inbound_queue_table *circularQ; + char *preq_dma_addr = NULL; + __le64 tmp_addr; + u32 i, length; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + + length = sg_req->length; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP Frame Length %d\n", sg_req->length)); + if (!(length - 8)) + pm8001_ha->smp_exp_mode = SMP_DIRECT; + else + pm8001_ha->smp_exp_mode = SMP_INDIRECT; + + + tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + preq_dma_addr = (char *)phys_to_virt(tmp_addr); + + /* INDIRECT MODE command settings. Use DMA */ + if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST INDIRECT MODE\n")); + /* for SPCv indirect mode. Place the top 4 bytes of + * SMP Request header here. */ + for (i = 0; i < 4; i++) + smp_cmd.smp_req16[i] = *(preq_dma_addr + i); + /* exclude top 4 bytes for SMP req header */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req) + 4); + /* exclude 4 bytes for SMP req header and CRC */ + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len + (&task->smp_task.smp_resp)-4); + } else { /* DIRECT MODE */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32 + ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + } + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST DIRECT MODE\n")); + for (i = 0; i < length; i++) + if (i < 16) { + smp_cmd.smp_req16[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req16[i], + *(preq_dma_addr))); + } else { + smp_cmd.smp_req[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req[i], + *(preq_dma_addr))); + } + } + + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, + &smp_cmd, pm8001_ha->smp_exp_mode, length); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static int check_enc_sas_cmd(struct sas_task *task) +{ + u8 cmd = task->ssp_task.cmd->cmnd[0]; + + if (cmd == READ_10 || cmd == WRITE_10 || cmd == WRITE_VERIFY) + return 1; + else + return 0; +} + +static int check_enc_sat_cmd(struct sas_task *task) +{ + int ret = 0; + switch (task->ata_task.fis.command) { + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +/** + * pm80xx_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + int ret; + u64 phys_addr, start_addr, end_addr; + u32 end_addr_high, end_addr_low; + struct inbound_queue_table *circularQ; + u32 q_index; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, + * used internally by controller + * 0 for SAS 1.1 and SAS 2.0 compatible TLR + */ + ssp_cmd.dad_dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + if (task->ssp_task.enable_first_burst) + ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, + task->ssp_task.cmd->cmd_len); + q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; + circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SAS command 0x%x\n", + task->ssp_task.cmd->cmnd[0])); + opc = OPC_INB_SSP_INI_DIF_ENC_IO; + /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ + ssp_cmd.dad_dir_m_tlr = cpu_to_le32 + ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + ssp_cmd.enc_len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != ssp_cmd.enc_addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x " + "end_addr_high=0x%08x end_addr_low=" + "0x%08x has crossed 4G boundary\n", + start_addr, ssp_cmd.enc_len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } + } else if (task->num_scatter == 0) { + ssp_cmd.enc_addr_low = 0; + ssp_cmd.enc_addr_high = 0; + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + ssp_cmd.key_cmode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | + (task->ssp_task.cmd->cmnd[3] << 16) | + (task->ssp_task.cmd->cmnd[4] << 8) | + (task->ssp_task.cmd->cmnd[5])); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SAS command 0x%x inb q %x\n", + task->ssp_task.cmd->cmnd[0], q_index)); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + ssp_cmd.len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != ssp_cmd.addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x " + "end_addr_high=0x%08x end_addr_low=" + "0x%08x has crossed 4G boundary\n", + start_addr, ssp_cmd.len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + } + q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, + &ssp_cmd, q_index); + return ret; +} + +static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + int ret; + u32 q_index; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr, start_addr, end_addr; + u32 end_addr_high, end_addr_low; + u32 ATAP = 0x0; + u32 dir; + struct inbound_queue_table *circularQ; + unsigned long flags; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; + circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; + + if (task->data_dir == PCI_DMA_NONE) { + ATAP = 0x04; /* no data*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n")); + } else { + ATAP = 0x05; /* PIO*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); + } + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + ATAP = 0x07; /* FPDMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", + sata_cmd.sata_fis.command)); + opc = OPC_INB_SATA_DIF_ENC_IO; + + /* set encryption bit */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16)| + ((ATAP & 0x3f) << 10) | 0x20 | dir); + /* dad (bit 0-1) is 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.enc_addr_low = lower_32_bits(phys_addr); + sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.enc_addr_low = lower_32_bits(dma_addr); + sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + sata_cmd.enc_len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != sata_cmd.enc_addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x " + "end_addr_high=0x%08x end_addr_low" + "=0x%08x has crossed 4G boundary\n", + start_addr, sata_cmd.enc_len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + sata_cmd.enc_addr_low = + lower_32_bits(phys_addr); + sata_cmd.enc_addr_high = + upper_32_bits(phys_addr); + sata_cmd.enc_esgl = + cpu_to_le32(1 << 31); + } + } else if (task->num_scatter == 0) { + sata_cmd.enc_addr_low = 0; + sata_cmd.enc_addr_high = 0; + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + sata_cmd.key_index_mode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + sata_cmd.twk_val0 = + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | + (sata_cmd.sata_fis.lbah << 16) | + (sata_cmd.sata_fis.lbam << 8) | + (sata_cmd.sata_fis.lbal)); + sata_cmd.twk_val1 = + cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | + (sata_cmd.sata_fis.lbam_exp)); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SATA command 0x%x inb %x\n", + sata_cmd.sata_fis.command, q_index)); + /* dad (bit 0-1) is 0 */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16) | + ((ATAP & 0x3f) << 10) | dir); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + /* Check 4G Boundary */ + start_addr = cpu_to_le64(dma_addr); + end_addr = (start_addr + sata_cmd.len) - 1; + end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); + end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + if (end_addr_high != sata_cmd.addr_high) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("The sg list address " + "start_addr=0x%016llx data_len=0x%x" + "end_addr_high=0x%08x end_addr_low=" + "0x%08x has crossed 4G boundary\n", + start_addr, sata_cmd.len, + end_addr_high, end_addr_low)); + pm8001_chip_make_sg(task->scatter, 1, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, + buf_prd[0]); + sata_cmd.addr_low = + lower_32_bits(phys_addr); + sata_cmd.addr_high = + upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + /* scsi cdb */ + sata_cmd.atapi_scsi_cdb[0] = + cpu_to_le32(((task->ata_task.atapi_packet[0]) | + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); + sata_cmd.atapi_scsi_cdb[1] = + cpu_to_le32(((task->ata_task.atapi_packet[4]) | + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); + sata_cmd.atapi_scsi_cdb[2] = + cpu_to_le32(((task->ata_task.atapi_packet[8]) | + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); + sata_cmd.atapi_scsi_cdb[3] = + cpu_to_le32(((task->ata_task.atapi_packet[12]) | + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); + } + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + return 0; + } else { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free_done(pm8001_ha, task, + ccb, tag); + return 0; + } + } + } + q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, + &sata_cmd, q_index); + return ret; +} + +/** + * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PHY START REQ for phy_id %d\n", phy_id)); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode + ** [14] 0b disable spin up hold; 1b enable spin up hold + ** [15] ob no change in current PHY analig setup 1b enable using SPAST + */ + if (!IS_SPCV_12G(pm8001_ha->pdev)) + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + else + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | LINKRATE_120 | + phy_id); + + /* SSC Disable and SAS Analog ST configuration */ + /** + payload.ase_sh_lm_slr_phyid = + cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | + phy_id); + Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need + **/ + + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * see comments on pm8001_mpi_reg_resp. + */ +static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + struct inbound_queue_table *circularQ; + u32 linkrate, phy_id; + int rc, tag = 0xdeadbeef; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = tag; + payload.tag = cpu_to_le32(tag); + + if (flag == 1) { + stp_sspsmp_sata = 0x02; /*direct attached sata */ + } else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + + opc = OPC_INB_REG_DEV; + + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + + payload.phyid_portid = + cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) | + ((phy_id & 0xFF) << 8)); + + payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) << 24) | + ((stp_sspsmp_sata & 0x03) << 28)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to operate + * @phy_op: + */ +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + memset(&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(1); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return ret; +} + +static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; +#ifdef PM8001_USE_MSIX + return 1; +#endif + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; + +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @irq: irq number. + * @stat: stat. + */ +static irqreturn_t +pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm80xx_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm80xx_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, + u32 operation, u32 phyid, u32 length, u32 *buf) +{ + u32 tag , i, j = 0; + int rc; + struct set_phy_profile_req payload; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SET_PHY_PROFILE; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n")); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk(" phy profile command for phy %x ,length is %d\n", + payload.ppc_phyid, length)); + for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { + payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); + j++; + } + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, + u32 length, u8 *buf) +{ + u32 page_code, i; + + page_code = SAS_PHY_ANALOG_SETTINGS_PAGE; + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { + mpi_set_phy_profile_req(pm8001_ha, + SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); + length = length + PHY_DWORD_LENGTH; + } + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n")); +} +const struct pm8001_dispatch pm8001_80xx_dispatch = { + .name = "pmc80xx", + .chip_init = pm80xx_chip_init, + .chip_soft_rst = pm80xx_chip_soft_rst, + .chip_rst = pm80xx_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm80xx_chip_isr, + .is_our_interupt = pm80xx_chip_is_our_interupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm80xx_chip_interrupt_enable, + .interrupt_disable = pm80xx_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm80xx_chip_smp_req, + .ssp_io_req = pm80xx_chip_ssp_io_req, + .sata_req = pm80xx_chip_sata_req, + .phy_start_req = pm80xx_chip_phy_start_req, + .phy_stop_req = pm80xx_chip_phy_stop_req, + .reg_dev_req = pm80xx_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm80xx_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, +}; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h new file mode 100644 index 00000000000..9970a385795 --- /dev/null +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -0,0 +1,1532 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include <linux/types.h> +#include <scsi/libsas.h> + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +/* 0x8 RESV IN SPCv */ +#define OPC_INB_RSVD 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +/* 0xC, 0xD, 0xE removed in SPCv */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* 0x13 SMP_RESPONSE is removed in SPCv */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +/* 0x16 RESV IN SPCv */ +#define OPC_INB_RSVD1 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +/* 0x1A RESV IN SPCv */ +#define OPC_INB_RSVD2 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +/* 0x25 RESV IN SPCv */ +#define OPC_INB_RSVD3 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +/* 0x2D RESV IN SPCv */ +#define OPC_INB_RSVD4 45 /* 0x02D */ +#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */ +#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */ +#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */ +#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */ +#define OPC_INB_REG_DEV 50 /* 0x032 */ +#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */ +#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */ +#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */ +#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */ +#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */ +#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */ +#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */ +#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */ +#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_RSVD 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_RSVD1 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_RSVD2 16 /* 0x010 */ +/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_RSVD3 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_RSVD4 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_RSVD5 41 /* 0x029 */ +#define OPC_OUB_HW_EVENT 1792 /* 0x700 */ +#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */ +#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */ +#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */ +#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */ +#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */ +#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */ +#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */ +/* spcv specific commands */ +#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */ +#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */ +#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */ +#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */ +#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */ +#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */ +#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */ +#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */ +#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */ +#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */ + +/* for phy start*/ +#define SSC_DISABLE_15 (0x01 << 16) +#define SSC_DISABLE_30 (0x02 << 16) +#define SSC_DISABLE_60 (0x04 << 16) +#define SAS_ASE (0x01 << 15) +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x06 << 8) +#define LINKRATE_120 (0x08 << 8) + +/* phy_profile */ +#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04 +#define PHY_DWORD_LENGTH 0xC + +/* Thermal related */ +#define THERMAL_ENABLE 0x1 +#define THERMAL_LOG_ENABLE 0x1 +#define THERMAL_OP_CODE 0x6 +#define LTEMPHIL 70 +#define RTEMPHIL 100 + +/* Encryption info */ +#define SCRATCH_PAD3_ENC_DISABLED 0x00000000 +#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001 +#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002 +#define SCRATCH_PAD3_ENC_READY 0x00000003 +#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY + +#define SCRATCH_PAD3_XTS_ENABLED (1 << 14) +#define SCRATCH_PAD3_SMA_ENABLED (1 << 4) +#define SCRATCH_PAD3_SMB_ENABLED (1 << 5) +#define SCRATCH_PAD3_SMF_ENABLED 0 +#define SCRATCH_PAD3_SM_MASK 0x000000F0 +#define SCRATCH_PAD3_ERR_CODE 0x00FF0000 + +#define SEC_MODE_SMF 0x0 +#define SEC_MODE_SMA 0x100 +#define SEC_MODE_SMB 0x200 +#define CIPHER_MODE_ECB 0x00000001 +#define CIPHER_MODE_XTS 0x00000002 +#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4 + +/* SAS protocol timer configuration page */ +#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04 +#define STP_MCT_TMO 32 +#define SSP_MCT_TMO 32 +#define SAS_MAX_OPEN_TIME 5 +#define SMP_MAX_CONN_TIMER 0xFF +#define STP_FRM_TIMER 0 +#define STP_IDLE_TIME 5 /* 5 us; controller default */ +#define SAS_MFD 0 +#define SAS_OPNRJT_RTRY_INTVL 2 +#define SAS_DOPNRJT_RTRY_TMO 128 +#define SAS_COPNRJT_RTRY_TMO 128 + +/* for phy state */ +#define PHY_STATE_LINK_UP_SPCV 0x2 +/* + Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. + Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 + is DOPNRJT_RTRY_TMO +*/ +#define SAS_DOPNRJT_RTRY_THR 23438 +#define SAS_COPNRJT_RTRY_THR 23438 +#define SAS_MAX_AIP 0x200000 +#define IT_NEXUS_TIMEOUT 0x7D0 +#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) + +struct mpi_msg_hdr { + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (128 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; /* 28 Bytes */ + __le32 spasti; + u32 reserved[21]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (128 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +/* updated outbound struct for spcv */ + +struct hw_event_resp { + __le32 lr_status_evt_portid; + __le32 evt_param; + __le32 phyid_npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure for thermal event notification + */ + +struct thermal_hw_event { + __le32 thermal_event; + __le32 rht_lht; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_mcn_ir_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ + struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ +struct hw_event_ack_req { + __le32 tag; + __le32 phyid_sea_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_START Response Command + * indicates the completion of PHY_START command (64 bytes) + */ +struct phy_start_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_STOP Response Command + * indicates the completion of PHY_STOP command (64 bytes) + */ +struct phy_stop_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET response + * use to indicate a SATA Completion (64 bytes) + */ +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved; + __le32 event_param0; + __le32 event_param1; + __le32 sata_addr_h32; + __le32 sata_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + __le32 ssp_tag; + __le32 event_param0; + __le32 event_param1; + __le32 sas_addr_h32; + __le32 sas_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; + __le32 rsvd[16]; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u8 _r_a[252]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[27]; +} __attribute__((packed, aligned(4))); + +/* These flags used for SSP SMP & SATA Abort */ +#define ABORT_MASK 0x3 +#define ABORT_SINGLE 0x0 +#define ABORT_ALL 0x1 + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 ncqtag_atap_dir_m_dad; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; /* dword 11. rsvd for normal I/O. */ + /* EPLE Descl for enc I/O */ + u32 addr_low; /* dword 12. rsvd for enc I/O */ + u32 addr_high; /* dword 13. reserved for enc I/O */ + __le32 len; /* dword 14: length for normal I/O. */ + /* EPLE Desch for enc I/O */ + __le32 esgl; /* dword 15. rsvd for enc I/O */ + __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */ + /* The below fields are reserved for normal I/O */ + __le32 key_index_mode; /* dword 20 */ + __le32 sector_cnt_enss;/* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28. Encryption SGL address high */ + __le32 enc_addr_high; /* dword 29. Encryption SGL address low */ + __le32 enc_len; /* dword 30. Encryption length */ + __le32 enc_esgl; /* dword 31. Encryption esgl bit */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dad_dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; /* dword 12: sgl low for normal I/O. */ + /* epl_descl for encryption I/O */ + __le32 addr_high; /* dword 13: sgl hi for normal I/O */ + /* dpl_descl for encryption I/O */ + __le32 len; /* dword 14: len for normal I/O. */ + /* edpl_desch for encryption I/O */ + __le32 esgl; /* dword 15: ESGL bit for normal I/O. */ + /* user defined tag mask for enc I/O */ + /* The below fields are reserved for normal I/O */ + u8 udt[12]; /* dword 16-18 */ + __le32 sectcnt_ios; /* dword 19 */ + __le32 key_cmode; /* dword 20 */ + __le32 ks_enss; /* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */ + __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */ + __le32 enc_len; /* dword 30: Encryption length */ + __le32 enc_esgl; /* dword 31: ESGL bit for encryption */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND + * use to initiate SSP I/O operation with optional DIF/ENC + */ +struct ssp_dif_enc_io_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dirMTlr; + __le32 sspiu0; + __le32 sspiu1; + __le32 sspiu2; + __le32 sspiu3; + __le32 sspiu4; + __le32 sspiu5; + __le32 sspiu6; + __le32 epl_des; + __le32 dpl_desl_ndplr; + __le32 dpl_desh; + __le32 uum_uuv_bss_difbits; + u8 udt[12]; + __le32 sectcnt_ios; + __le32 key_cmode; + __le32 ks_enss; + __le32 keytagl; + __le32 keytagh; + __le32 twk_val0; + __le32 twk_val1; + __le32 twk_val2; + __le32 twk_val3; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ + struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_req { + __le32 tag; + __le32 cfg_pg[14]; + u32 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET CONTROLLER CONFIG COMMAND + * use to get controller configuration page + */ +struct get_ctrl_cfg_req { + __le32 tag; + __le32 pgcd; + __le32 int_vec; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for KEK_MANAGEMENT COMMAND + * use for KEK management + */ +struct kek_mgmt_req { + __le32 tag; + __le32 new_curidx_ksop; + u32 reserved; + __le32 kblob[12]; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for DEK_MANAGEMENT COMMAND + * use for DEK management + */ +struct dek_mgmt_req { + __le32 tag; + __le32 kidx_dsop; + __le32 dekidx; + __le32 addr_l; + __le32 addr_h; + __le32 nent; + __le32 dbf_tblsize; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct set_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct get_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 profile[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for EXT FLASH PARTITION + * use to manage ext flash partition + */ +struct ext_flash_partition_req { + __le32 tag; + __le32 cmd; + __le32 offset; + __le32 len; + u32 reserved[7]; + __le32 addr_low; + __le32 addr_high; + __le32 len1; + __le32 ext; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - begins */ +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr_pgcd; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +struct get_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr; + __le32 confg_page[12]; +} __attribute__((packed, aligned(4))); + +struct kek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kidx_new_curr_ksop; + __le32 err_qlfr; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +struct dek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kekidx_tbls_dsop; + __le32 dekidx; + __le32 err_qlfr; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct get_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct flash_op_ext_resp { + __le32 tag; + __le32 cmd; + __le32 status; + __le32 epart_size; + __le32 epart_sect_size; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct set_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct ssp_coalesced_comp_resp { + __le32 coal_cnt; + __le32 tag0; + __le32 ssp_tag0; + __le32 tag1; + __le32 ssp_tag1; + __le32 add_tag_ssp_tag[10]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - ends */ + +/* brief data structure for SAS protocol timer configuration page. + * + */ +struct SASProtocolTimerConfig { + __le32 pageCode; /* 0 */ + __le32 MST_MSI; /* 1 */ + __le32 STP_SSP_MCT_TMO; /* 2 */ + __le32 STP_FRM_TMO; /* 3 */ + __le32 STP_IDLE_TMO; /* 4 */ + __le32 OPNRJT_RTRY_INTVL; /* 5 */ + __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */ + __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */ + __le32 MAX_AIP; /* 8 */ +} __attribute__((packed, aligned(4))); + +typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_3RD_PARTY_RESET 0x07 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A + +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +/* This error code 0x18 is not used on SPCv */ +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 + +/* This error code 0x22 is not used on SPCv */ +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +/* This error code 0x25 is not used on SPCv */ +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 + +/* The following error code 0x31 and 0x32 are not using (obsolete) */ +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/********** additional response event values *****************/ + +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43 +#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48 +#define IO_DS_INVALID 0x49 +/* WARNING: the value is not contiguous from here */ +#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52 +#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53 +#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 +#define MPI_IO_RQE_BUSY_FULL 0x55 +#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 +#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57 +#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 + +#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 +#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024 + +#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040 +/* + * An encryption IO request failed due to DEK Key Tag mismatch. + * The key tag supplied in the encryption IOMB does not match with + * the Key Tag in the referenced DEK Entry. + */ +#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041 +#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042 +/* + * An encryption I/O request failed because the initial value (IV) + * in the unwrapped DEK blob didn't match the IV used to unwrap it. + */ +#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_INTERNAL_RAM 0x2045 +/* + * An encryption I/O request failed + * because the DEK index specified in the I/O was outside the bounds of + * the total number of entries in the host DEK table. + */ +#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046 + +/* define DIF IO response error status code */ +#define IO_XFR_ERROR_DIF_MISMATCH 0x3000 +#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001 +#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002 +#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003 + +/* define operator management response status and error qualifier code */ +#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060 +#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063 +#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064 +#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022 +#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023 +/***************** additional response event values ***************/ + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x2023 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x001 +#define SPCv_MSGU_CFG_TABLE_RESET 0x002 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x004 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x008 +#define MSGU_IBDB_SET 0x00 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20 + +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x20 +#define MSGU_ODCR 0x28 + +#define MSGU_ODMR 0x30 +#define MSGU_ODMR_U 0x34 +#define MSGU_ODMR_CLR 0x38 +#define MSGU_ODMR_CLR_U 0x3C +#define MSGU_OD_RSVD 0x40 + +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0 /* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \ + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD_RAAE_READY 0x3 +#define SCRATCH_PAD_ILA_READY 0xC +#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 +#define SCRATCH_PAD_IOP0_READY 0xC00 +#define SCRATCH_PAD_IOP1_READY 0x3000 + +/* boot loader state */ +#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ +#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */ +#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */ +#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */ + +/* 0x28 - 0x4C - RSVD */ +#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */ +#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */ +#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */ +#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */ +#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */ +#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */ + +#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */ +#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ +#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ +#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +/* 0x14 - 0x34 - RSVD */ +#define GST_GPIO_INPUT_VAL 0x38 +/* 0x3c - 0x40 - RSVD */ +#define GST_RERRINFO_OFFSET0 0x44 +#define GST_RERRINFO_OFFSET1 0x48 +#define GST_RERRINFO_OFFSET2 0x4c +#define GST_RERRINFO_OFFSET3 0x50 +#define GST_RERRINFO_OFFSET4 0x54 +#define GST_RERRINFO_OFFSET5 0x58 +#define GST_RERRINFO_OFFSET6 0x5c +#define GST_RERRINFO_OFFSET7 0x60 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +/* Per SAS PHY Attributes */ + +#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */ +#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */ +#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */ +#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */ +#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */ +#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */ +#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */ +#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */ +#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */ +#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */ +#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */ +#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */ +#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */ +#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */ +#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */ +#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */ +#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */ +#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */ +#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */ +#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */ +#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */ +#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */ +#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */ +#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */ +#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */ +#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */ +#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */ +#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */ +#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */ +#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */ +#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */ +#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */ +/* end PSPA */ + +/* inbound queue configuration offset - byte offset */ +#define IB_PROPERITY_OFFSET 0x00 +#define IB_BASE_ADDR_HI_OFFSET 0x04 +#define IB_BASE_ADDR_LO_OFFSET 0x08 +#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C +#define IB_CI_BASE_ADDR_LO_OFFSET 0x10 +#define IB_PIPCI_BAR 0x14 +#define IB_PIPCI_BAR_OFFSET 0x18 +#define IB_RESERVED_OFFSET 0x1C + +/* outbound queue configuration offset - byte offset */ +#define OB_PROPERITY_OFFSET 0x00 +#define OB_BASE_ADDR_HI_OFFSET 0x04 +#define OB_BASE_ADDR_LO_OFFSET 0x08 +#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C +#define OB_PI_BASE_ADDR_LO_OFFSET 0x10 +#define OB_CIPCI_BAR 0x14 +#define OB_CIPCI_BAR_OFFSET 0x18 +#define OB_INTERRUPT_COALES_OFFSET 0x1C +#define OB_DYNAMIC_COALES_OFFSET 0x20 +#define OB_PROPERTY_INT_ENABLE 0x40000000 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C + +/* SPCV soft reset */ +#define SPC_REG_SOFT_RESET 0x00001000 +#define SPCv_NORMAL_RESET_VALUE 0x1 + +#define SPCv_SOFT_RESET_READ_MASK 0xC0 +#define SPCv_SOFT_RESET_NO_RESET 0x0 +#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40 +#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80 +#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0 + +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit definition for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define HDA_GSM_CMD_OFFSET_BITS 0x42C0 +#define HDA_GSM_RSP_OFFSET_BITS 0x42E0 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + + +#define MEMBASE_II_SHIFT_REGISTER 0x1010 +#endif diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index b46f5e90683..be8ce54f99b 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1404,11 +1404,22 @@ enum { }; #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) +static struct genl_multicast_group pmcraid_mcgrps[] = { + { .name = "events", /* not really used - see ID discussion below */ }, +}; + static struct genl_family pmcraid_event_family = { - .id = GENL_ID_GENERATE, + /* + * Due to prior multicast group abuse (the code having assumed that + * the family ID can be used as a multicast group ID) we need to + * statically allocate a family (and thus group) ID. + */ + .id = GENL_ID_PMCRAID, .name = "pmcraid", .version = 1, - .maxattr = PMCRAID_AEN_ATTR_MAX + .maxattr = PMCRAID_AEN_ATTR_MAX, + .mcgrps = pmcraid_mcgrps, + .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), }; /** @@ -1511,8 +1522,8 @@ static int pmcraid_notify_aen( return result; } - result = - genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); + result = genlmsg_multicast(&pmcraid_event_family, skb, + 0, 0, GFP_ATOMIC); /* If there are no listeners, genlmsg_multicast may return non-zero * value. @@ -3599,19 +3610,6 @@ static int pmcraid_chr_open(struct inode *inode, struct file *filep) } /** - * pmcraid_release - char node "release" entry point - */ -static int pmcraid_chr_release(struct inode *inode, struct file *filep) -{ - struct pmcraid_instance *pinstance = filep->private_data; - - filep->private_data = NULL; - fasync_helper(-1, filep, 0, &pinstance->aen_queue); - - return 0; -} - -/** * pmcraid_fasync - Async notifier registration from applications * * This function adds the calling process to a driver global queue. When an @@ -4167,7 +4165,6 @@ static long pmcraid_chr_ioctl( static const struct file_operations pmcraid_fops = { .owner = THIS_MODULE, .open = pmcraid_chr_open, - .release = pmcraid_chr_release, .fasync = pmcraid_chr_fasync, .unlocked_ioctl = pmcraid_chr_ioctl, #ifdef CONFIG_COMPAT @@ -4328,6 +4325,7 @@ static struct scsi_host_template pmcraid_host_template = { .this_id = -1, .sg_tablesize = PMCRAID_MAX_IOADLS, .max_sectors = PMCRAID_IOA_MAX_SECTORS, + .no_write_same = 1, .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = pmcraid_host_attrs, @@ -6063,7 +6061,6 @@ out_release_regions: out_disable_device: atomic_dec(&pmcraid_adapter_count); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return -ENODEV; } @@ -6106,7 +6103,7 @@ static int __init pmcraid_init(void) if (IS_ERR(pmcraid_class)) { error = PTR_ERR(pmcraid_class); - pmcraid_err("failed to register with with sysfs, error = %x\n", + pmcraid_err("failed to register with sysfs, error = %x\n", error); goto out_unreg_chrdev; } diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index d164c963936..1db8b26063b 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -118,8 +118,9 @@ static inline void ppa_pb_release(ppa_struct *dev) * Also gives a method to use a script to obtain optimum timings (TODO) */ -static inline int ppa_proc_write(ppa_struct *dev, char *buffer, int length) +static inline int ppa_write_info(struct Scsi_Host *host, char *buffer, int length) { + ppa_struct *dev = ppa_dev(host); unsigned long x; if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) { @@ -137,35 +138,17 @@ static inline int ppa_proc_write(ppa_struct *dev, char *buffer, int length) return -EINVAL; } -static int ppa_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) +static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host) { - int len = 0; ppa_struct *dev = ppa_dev(host); - if (inout) - return ppa_proc_write(dev, buffer, length); - - len += sprintf(buffer + len, "Version : %s\n", PPA_VERSION); - len += - sprintf(buffer + len, "Parport : %s\n", - dev->dev->port->name); - len += - sprintf(buffer + len, "Mode : %s\n", - PPA_MODE_STRING[dev->mode]); + seq_printf(m, "Version : %s\n", PPA_VERSION); + seq_printf(m, "Parport : %s\n", dev->dev->port->name); + seq_printf(m, "Mode : %s\n", PPA_MODE_STRING[dev->mode]); #if PPA_DEBUG > 0 - len += - sprintf(buffer + len, "recon_tmo : %lu\n", dev->recon_tmo); + seq_printf(m, "recon_tmo : %lu\n", dev->recon_tmo); #endif - - /* Request for beyond end of buffer */ - if (offset > length) - return 0; - - *start = buffer + offset; - len -= offset; - if (len > length) - len = length; - return len; + return 0; } static int device_check(ppa_struct *dev); @@ -981,7 +964,8 @@ static int ppa_adjust_queue(struct scsi_device *device) static struct scsi_host_template ppa_template = { .module = THIS_MODULE, .proc_name = "ppa", - .proc_info = ppa_proc_info, + .show_info = ppa_show_info, + .write_info = ppa_write_info, .name = "Iomega VPI0 (ppa) interface", .queuecommand = ppa_queuecommand, .eh_abort_handler = ppa_abort, diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 5a522c5bbd4..158020522df 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -379,14 +379,7 @@ #define DEBUG_PRINT_NVRAM 0 #define DEBUG_QLA1280 0 -/* - * The SGI VISWS is broken and doesn't support MMIO ;-( - */ -#ifdef CONFIG_X86_VISWS -#define MEMORY_MAPPED_IO 0 -#else #define MEMORY_MAPPED_IO 1 -#endif #include "qla1280.h" @@ -2502,7 +2495,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) /* Issue set host interrupt command. */ /* set up a timer just in case we're really jammed */ - init_timer(&timer); + init_timer_on_stack(&timer); timer.expires = jiffies + 20*HZ; timer.data = (unsigned long)ha; timer.function = qla1280_mailbox_timeout; diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 317a7fdc3b8..23d607218ae 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -24,7 +24,9 @@ config SCSI_QLA_FC Firmware images can be retrieved from: - ftp://ftp.qlogic.com/outgoing/linux/firmware/ + http://ldriver.qlogic.com/firmware/ + + They are also included in the linux-firmware tree as well. config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index dce7d788cdc..44def6bb4bb 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile @@ -1,6 +1,6 @@ qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ - qla_nx.o qla_target.o + qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1d82eef4e1e..16fe5196e6d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -29,7 +29,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, if (!(ha->fw_dump_reading || ha->mctp_dump_reading)) return 0; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (off < ha->md_template_size) { rval = memory_read_from_buffer(buf, count, &off, ha->md_tmplt_hdr, ha->md_template_size); @@ -71,7 +71,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x705d, "Firmware dump cleared on (%ld).\n", vha->host_no); - if (IS_QLA82XX(vha->hw)) { + if (IS_P3P_TYPE(ha)) { qla82xx_md_free(vha); qla82xx_md_prep(vha); } @@ -95,11 +95,15 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); } else qla2x00_system_error(vha); break; case 4: - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (ha->md_tmplt_hdr) ql_dbg(ql_dbg_user, vha, 0x705b, "MiniDump supported with this firmware.\n"); @@ -109,7 +113,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, } break; case 5: - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case 6: @@ -143,6 +147,92 @@ static struct bin_attribute sysfs_fw_dump_attr = { }; static ssize_t +qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + + if (!ha->fw_dump_template || !ha->fw_dump_template_len) + return 0; + + ql_dbg(ql_dbg_user, vha, 0x70e2, + "chunk <- off=%llx count=%zx\n", off, count); + return memory_read_from_buffer(buf, count, &off, + ha->fw_dump_template, ha->fw_dump_template_len); +} + +static ssize_t +qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + uint32_t size; + + if (off == 0) { + if (ha->fw_dump) + vfree(ha->fw_dump); + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + + ha->fw_dump = NULL; + ha->fw_dump_len = 0; + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; + + size = qla27xx_fwdt_template_size(buf); + ql_dbg(ql_dbg_user, vha, 0x70d1, + "-> allocating fwdt (%x bytes)...\n", size); + ha->fw_dump_template = vmalloc(size); + if (!ha->fw_dump_template) { + ql_log(ql_log_warn, vha, 0x70d2, + "Failed allocate fwdt (%x bytes).\n", size); + return -ENOMEM; + } + ha->fw_dump_template_len = size; + } + + if (off + count > ha->fw_dump_template_len) { + count = ha->fw_dump_template_len - off; + ql_dbg(ql_dbg_user, vha, 0x70d3, + "chunk -> truncating to %zx bytes.\n", count); + } + + ql_dbg(ql_dbg_user, vha, 0x70d4, + "chunk -> off=%llx count=%zx\n", off, count); + memcpy(ha->fw_dump_template + off, buf, count); + + if (off + count == ha->fw_dump_template_len) { + size = qla27xx_fwdt_calculate_dump_size(vha); + ql_dbg(ql_dbg_user, vha, 0x70d5, + "-> allocating fwdump (%x bytes)...\n", size); + ha->fw_dump = vmalloc(size); + if (!ha->fw_dump) { + ql_log(ql_log_warn, vha, 0x70d6, + "Failed allocate fwdump (%x bytes).\n", size); + return -ENOMEM; + } + ha->fw_dump_len = size; + } + + return count; +} +static struct bin_attribute sysfs_fw_dump_template_attr = { + .attr = { + .name = "fw_dump_template", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_fw_dump_template, + .write = qla2x00_sysfs_write_fw_dump_template, +}; + +static ssize_t qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) @@ -237,12 +327,17 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; + ssize_t rval = 0; if (ha->optrom_state != QLA_SREADING) return 0; - return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, - ha->optrom_region_size); + mutex_lock(&ha->optrom_mutex); + rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, + ha->optrom_region_size); + mutex_unlock(&ha->optrom_mutex); + + return rval; } static ssize_t @@ -261,7 +356,9 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, if (off + count > ha->optrom_region_size) count = ha->optrom_region_size - off; + mutex_lock(&ha->optrom_mutex); memcpy(&ha->optrom_buffer[off], buf, count); + mutex_unlock(&ha->optrom_mutex); return count; } @@ -284,10 +381,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; - uint32_t start = 0; uint32_t size = ha->optrom_size; int val, valid; + ssize_t rval = count; if (off) return -EINVAL; @@ -300,12 +397,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, if (start > ha->optrom_size) return -EINVAL; + mutex_lock(&ha->optrom_mutex); switch (val) { case 0: if (ha->optrom_state != QLA_SREADING && - ha->optrom_state != QLA_SWRITING) - return -EINVAL; - + ha->optrom_state != QLA_SWRITING) { + rval = -EINVAL; + goto out; + } ha->optrom_state = QLA_SWAITING; ql_dbg(ql_dbg_user, vha, 0x7061, @@ -316,8 +415,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_buffer = NULL; break; case 1: - if (ha->optrom_state != QLA_SWAITING) - return -EINVAL; + if (ha->optrom_state != QLA_SWAITING) { + rval = -EINVAL; + goto out; + } ha->optrom_region_start = start; ha->optrom_region_size = start + size > ha->optrom_size ? @@ -331,13 +432,15 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "(%x).\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; - return -ENOMEM; + rval = -ENOMEM; + goto out; } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7063, "HBA not online, failing NVRAM update.\n"); - return -EAGAIN; + rval = -EAGAIN; + goto out; } ql_dbg(ql_dbg_user, vha, 0x7064, @@ -349,8 +452,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_region_start, ha->optrom_region_size); break; case 2: - if (ha->optrom_state != QLA_SWAITING) - return -EINVAL; + if (ha->optrom_state != QLA_SWAITING) { + rval = -EINVAL; + goto out; + } /* * We need to be more restrictive on which FLASH regions are @@ -384,7 +489,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, if (!valid) { ql_log(ql_log_warn, vha, 0x7065, "Invalid start region 0x%x/0x%x.\n", start, size); - return -EINVAL; + rval = -EINVAL; + goto out; } ha->optrom_region_start = start; @@ -399,7 +505,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "(%x)\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; - return -ENOMEM; + rval = -ENOMEM; + goto out; } ql_dbg(ql_dbg_user, vha, 0x7067, @@ -409,13 +516,16 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, memset(ha->optrom_buffer, 0, ha->optrom_region_size); break; case 3: - if (ha->optrom_state != QLA_SWRITING) - return -EINVAL; + if (ha->optrom_state != QLA_SWRITING) { + rval = -EINVAL; + goto out; + } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7068, "HBA not online, failing flash update.\n"); - return -EAGAIN; + rval = -EAGAIN; + goto out; } ql_dbg(ql_dbg_user, vha, 0x7069, @@ -426,9 +536,12 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_region_start, ha->optrom_region_size); break; default: - return -EINVAL; + rval = -EINVAL; } - return count; + +out: + mutex_unlock(&ha->optrom_mutex); + return rval; } static struct bin_attribute sysfs_optrom_ctl_attr = { @@ -551,7 +664,7 @@ do_read: } rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, - addr, offset, SFP_BLOCK_SIZE, 0); + addr, offset, SFP_BLOCK_SIZE, BIT_1); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x706d, "Unable to read SFP data (%x/%x/%x).\n", rval, @@ -586,7 +699,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); int type; uint32_t idc_control; - + uint8_t *tmp_data = NULL; if (off != 0) return -EINVAL; @@ -597,14 +710,23 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, "Issuing ISP reset.\n"); scsi_block_requests(vha->host); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); if (IS_QLA82XX(ha)) { ha->flags.isp82xx_no_md_cap = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + idc_control = qla8044_rd_reg(ha, + QLA8044_IDC_DRV_CTRL); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_control | GRACEFUL_RESET_BIT1)); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); + } else { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); } - qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); break; @@ -640,7 +762,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, break; } case 0x2025e: - if (!IS_QLA82XX(ha) || vha != base_vha) { + if (!IS_P3P_TYPE(ha) || vha != base_vha) { ql_log(ql_log_info, vha, 0x7071, "FCoE ctx reset no supported.\n"); return -EPERM; @@ -674,7 +796,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, __qla83xx_set_idc_control(vha, idc_control); qla83xx_idc_unlock(vha, 0); break; - + case 0x20261: + ql_dbg(ql_dbg_user, vha, 0x70e0, + "Updating cache versions without reset "); + + tmp_data = vmalloc(256); + if (!tmp_data) { + ql_log(ql_log_warn, vha, 0x70e1, + "Unable to allocate memory for VPD information update.\n"); + return -ENOMEM; + } + ha->isp_ops->get_flash_version(vha, tmp_data); + vfree(tmp_data); + break; } return count; } @@ -797,6 +931,7 @@ static struct sysfs_entry { int is4GBp_only; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr, }, + { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 }, { "nvram", &sysfs_nvram_attr, }, { "optrom", &sysfs_optrom_attr, }, { "optrom_ctl", &sysfs_optrom_ctl_attr, }, @@ -822,6 +957,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) continue; + if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) + continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -837,7 +974,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) } void -qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) +qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) { struct Scsi_Host *host = vha->host; struct sysfs_entry *iter; @@ -855,7 +992,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) iter->attr); } - if (ha->beacon_blink_led == 1) + if (stop_beacon && ha->beacon_blink_led == 1) ha->isp_ops->beacon_off(vha); } @@ -865,7 +1002,7 @@ static ssize_t qla2x00_drvr_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); + return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); } static ssize_t @@ -876,7 +1013,7 @@ qla2x00_fw_version_show(struct device *dev, struct qla_hw_data *ha = vha->hw; char fw_str[128]; - return snprintf(buf, PAGE_SIZE, "%s\n", + return scnprintf(buf, PAGE_SIZE, "%s\n", ha->isp_ops->fw_version_str(vha, fw_str)); } @@ -888,13 +1025,16 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, struct qla_hw_data *ha = vha->hw; uint32_t sn; - if (IS_FWI2_CAPABLE(ha)) { - qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE); - return snprintf(buf, PAGE_SIZE, "%s\n", buf); + if (IS_QLAFX00(vha->hw)) { + return scnprintf(buf, PAGE_SIZE, "%s\n", + vha->hw->mr.serial_num); + } else if (IS_FWI2_CAPABLE(ha)) { + qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1); + return strlen(strcat(buf, "\n")); } sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; - return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, + return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, sn % 100000); } @@ -903,7 +1043,7 @@ qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); + return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); } static ssize_t @@ -912,7 +1052,12 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", + + if (IS_QLAFX00(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "%s\n", + vha->hw->mr.hw_version); + + return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", ha->product_id[0], ha->product_id[1], ha->product_id[2], ha->product_id[3]); } @@ -922,7 +1067,8 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); + + return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); } static ssize_t @@ -930,7 +1076,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%s\n", + return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc ? vha->hw->model_desc : ""); } @@ -941,7 +1087,7 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); char pci_info[30]; - return snprintf(buf, PAGE_SIZE, "%s\n", + return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->isp_ops->pci_info_str(vha, pci_info)); } @@ -956,29 +1102,29 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, if (atomic_read(&vha->loop_state) == LOOP_DOWN || atomic_read(&vha->loop_state) == LOOP_DEAD || vha->device_flags & DFLG_NO_CABLE) - len = snprintf(buf, PAGE_SIZE, "Link Down\n"); + len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); else if (atomic_read(&vha->loop_state) != LOOP_READY || qla2x00_reset_active(vha)) - len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); + len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); else { - len = snprintf(buf, PAGE_SIZE, "Link Up - "); + len = scnprintf(buf, PAGE_SIZE, "Link Up - "); switch (ha->current_topology) { case ISP_CFG_NL: - len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); break; case ISP_CFG_FL: - len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); break; case ISP_CFG_N: - len += snprintf(buf + len, PAGE_SIZE-len, + len += scnprintf(buf + len, PAGE_SIZE-len, "N_Port to N_Port\n"); break; case ISP_CFG_F: - len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); break; default: - len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n"); break; } } @@ -994,10 +1140,10 @@ qla2x00_zio_show(struct device *dev, struct device_attribute *attr, switch (vha->hw->zio_mode) { case QLA_ZIO_MODE_6: - len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); break; case QLA_ZIO_DISABLED: - len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); break; } return len; @@ -1037,7 +1183,7 @@ qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); + return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); } static ssize_t @@ -1067,9 +1213,9 @@ qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, int len = 0; if (vha->hw->beacon_blink_led) - len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); else - len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); + len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); return len; } @@ -1111,7 +1257,7 @@ qla2x00_optrom_bios_version_show(struct device *dev, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], + return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], ha->bios_revision[0]); } @@ -1121,7 +1267,7 @@ qla2x00_optrom_efi_version_show(struct device *dev, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], + return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], ha->efi_revision[0]); } @@ -1131,7 +1277,7 @@ qla2x00_optrom_fcode_version_show(struct device *dev, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], + return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], ha->fcode_revision[0]); } @@ -1141,7 +1287,7 @@ qla2x00_optrom_fw_version_show(struct device *dev, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); } @@ -1153,10 +1299,10 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) - return snprintf(buf, PAGE_SIZE, "\n"); + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", ha->gold_fw_version[0], ha->gold_fw_version[1], ha->gold_fw_version[2], ha->gold_fw_version[3]); } @@ -1166,7 +1312,7 @@ qla2x00_total_isp_aborts_show(struct device *dev, struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", + return scnprintf(buf, PAGE_SIZE, "%d\n", vha->qla_stats.total_isp_aborts); } @@ -1180,16 +1326,16 @@ qla24xx_84xx_fw_version_show(struct device *dev, struct qla_hw_data *ha = vha->hw; if (!IS_QLA84XX(ha)) - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); if (ha->cs84xx->op_fw_version == 0) rval = qla84xx_verify_chip(vha, status); if ((rval == QLA_SUCCESS) && (status[0] == 0)) - return snprintf(buf, PAGE_SIZE, "%u\n", + return scnprintf(buf, PAGE_SIZE, "%u\n", (uint32_t)ha->cs84xx->op_fw_version); - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); } static ssize_t @@ -1199,10 +1345,10 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) - return snprintf(buf, PAGE_SIZE, "\n"); + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2], ha->mpi_capabilities); } @@ -1215,9 +1361,9 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr, struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]); } @@ -1228,7 +1374,7 @@ qla2x00_flash_block_size_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); + return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); } static ssize_t @@ -1238,9 +1384,9 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_CNA_CAPABLE(vha->hw)) - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); + return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); } static ssize_t @@ -1250,12 +1396,9 @@ qla2x00_vn_port_mac_address_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_CNA_CAPABLE(vha->hw)) - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", - vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4], - vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2], - vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); + return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); } static ssize_t @@ -1264,7 +1407,7 @@ qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); + return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); } static ssize_t @@ -1274,12 +1417,6 @@ qla2x00_thermal_temp_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); uint16_t temp = 0; - if (!vha->hw->thermal_support) { - ql_log(ql_log_warn, vha, 0x70db, - "Thermal not supported by this card.\n"); - goto done; - } - if (qla2x00_reset_active(vha)) { ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); goto done; @@ -1291,10 +1428,10 @@ qla2x00_thermal_temp_show(struct device *dev, } if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS) - return snprintf(buf, PAGE_SIZE, "%d\n", temp); + return scnprintf(buf, PAGE_SIZE, "%d\n", temp); done: - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); } static ssize_t @@ -1304,6 +1441,12 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int rval = QLA_FUNCTION_FAILED; uint16_t state[5]; + uint32_t pstate; + + if (IS_QLAFX00(vha->hw)) { + pstate = qlafx00_fw_state_show(dev, attr, buf); + return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); + } if (qla2x00_reset_active(vha)) ql_log(ql_log_warn, vha, 0x707c, @@ -1313,7 +1456,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, if (rval != QLA_SUCCESS) memset(state, -1, sizeof(state)); - return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], + return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], state[1], state[2], state[3], state[4]); } @@ -1324,9 +1467,9 @@ qla2x00_diag_requests_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_BIDI_CAPABLE(vha->hw)) - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); + return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); } static ssize_t @@ -1336,9 +1479,9 @@ qla2x00_diag_megabytes_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); if (!IS_BIDI_CAPABLE(vha->hw)) - return snprintf(buf, PAGE_SIZE, "\n"); + return scnprintf(buf, PAGE_SIZE, "\n"); - return snprintf(buf, PAGE_SIZE, "%llu\n", + return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.transfer_bytes >> 20); } @@ -1352,12 +1495,43 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, if (!ha->fw_dumped) size = 0; - else if (IS_QLA82XX(ha)) + else if (IS_P3P_TYPE(ha)) size = ha->md_template_size + ha->md_dump_size; else size = ha->fw_dump_len; - return snprintf(buf, PAGE_SIZE, "%d\n", size); + return scnprintf(buf, PAGE_SIZE, "%d\n", size); +} + +static ssize_t +qla2x00_allow_cna_fw_dump_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_P3P_TYPE(vha->hw)) + return scnprintf(buf, PAGE_SIZE, "\n"); + else + return scnprintf(buf, PAGE_SIZE, "%s\n", + vha->hw->allow_cna_fw_dump ? "true" : "false"); +} + +static ssize_t +qla2x00_allow_cna_fw_dump_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int val = 0; + + if (!IS_P3P_TYPE(vha->hw)) + return -EINVAL; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + vha->hw->allow_cna_fw_dump = val != 0; + + return strlen(buf); } static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); @@ -1401,6 +1575,9 @@ static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); +static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, + qla2x00_allow_cna_fw_dump_show, + qla2x00_allow_cna_fw_dump_store); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1433,6 +1610,7 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_diag_requests, &dev_attr_diag_megabytes, &dev_attr_fw_dump_size, + &dev_attr_allow_cna_fw_dump, NULL, }; @@ -1454,6 +1632,11 @@ qla2x00_get_host_speed(struct Scsi_Host *shost) (shost_priv(shost)))->hw; u32 speed = FC_PORTSPEED_UNKNOWN; + if (IS_QLAFX00(ha)) { + qlafx00_get_host_speed(shost); + return; + } + switch (ha->link_data_rate) { case PORT_SPEED_1GB: speed = FC_PORTSPEED_1GBIT; @@ -1473,6 +1656,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost) case PORT_SPEED_16GB: speed = FC_PORTSPEED_16GBIT; break; + case PORT_SPEED_32GB: + speed = FC_PORTSPEED_32GBIT; + break; } fc_host_speed(shost) = speed; } @@ -1637,6 +1823,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); + if (IS_QLAFX00(vha->hw)) + return 0; + qla2x00_loop_reset(vha); return 0; } @@ -1655,12 +1844,18 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) pfc_host_stat = &vha->fc_host_stat; memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); + if (IS_QLAFX00(vha->hw)) + goto done; + if (test_bit(UNLOADING, &vha->dpc_flags)) goto done; if (unlikely(pci_channel_offline(ha->pdev))) goto done; + if (qla2x00_reset_active(vha)) + goto done; + stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); if (stats == NULL) { ql_log(ql_log_warn, vha, 0x707d, @@ -1673,7 +1868,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) if (IS_FWI2_CAPABLE(ha)) { rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma); } else if (atomic_read(&base_vha->loop_state) == LOOP_READY && - !qla2x00_reset_active(vha) && !ha->dpc_active) { + !ha->dpc_active) { /* Must be in a 'READY' state for statistics retrieval. */ rval = qla2x00_get_link_status(base_vha, base_vha->loop_id, stats, stats_dma); @@ -1692,11 +1887,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) pfc_host_stat->lip_count = stats->lip_cnt; pfc_host_stat->tx_frames = stats->tx_frames; pfc_host_stat->rx_frames = stats->rx_frames; - pfc_host_stat->dumped_frames = stats->dumped_frames; + pfc_host_stat->dumped_frames = stats->discarded_frames; pfc_host_stat->nos_count = stats->nos_rcvd; + pfc_host_stat->error_frames = + stats->dropped_frames + stats->discarded_frames; + pfc_host_stat->rx_words = vha->qla_stats.input_bytes; + pfc_host_stat->tx_words = vha->qla_stats.output_bytes; } + pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests; + pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests; + pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests; pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20; pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20; + pfc_host_stat->seconds_since_last_reset = + get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset; + do_div(pfc_host_stat->seconds_since_last_reset, HZ); done_free: dma_pool_free(ha->s_dma_pool, stats, stats_dma); @@ -1705,6 +1910,16 @@ done: } static void +qla2x00_reset_host_stats(struct Scsi_Host *shost) +{ + scsi_qla_host_t *vha = shost_priv(shost); + + memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); + + vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); +} + +static void qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); @@ -1925,6 +2140,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) vha->flags.delete_progress = 1; + qlt_remove_target(ha, vha); + fc_remove_host(vha->host); scsi_remove_host(vha->host); @@ -1938,11 +2155,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) "Timer for the VP[%d] has stopped\n", vha->vp_idx); } - /* No pending activities shall be there on the vha now */ - if (ql2xextended_error_logging & ql_dbg_user) - msleep(random32()%10); /* Just to see if something falls on - * the net we have placed below */ - BUG_ON(atomic_read(&vha->vref_count)); qla2x00_free_fcports(vha); @@ -2015,6 +2227,7 @@ struct fc_function_template qla2xxx_transport_functions = { .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, .terminate_rport_io = qla2x00_terminate_rport_io, .get_fc_host_stats = qla2x00_get_fc_host_stats, + .reset_fc_host_stats = qla2x00_reset_host_stats, .vport_create = qla24xx_vport_create, .vport_disable = qla24xx_vport_disable, @@ -2061,6 +2274,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk, .terminate_rport_io = qla2x00_terminate_rport_io, .get_fc_host_stats = qla2x00_get_fc_host_stats, + .reset_fc_host_stats = qla2x00_reset_host_stats, + .bsg_request = qla24xx_bsg_request, .bsg_timeout = qla24xx_bsg_timeout, }; @@ -2092,6 +2307,12 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) FC_PORTSPEED_1GBIT; else if (IS_QLA23XX(ha)) speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; + else if (IS_QLAFX00(ha)) + speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | + FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; + else if (IS_QLA27XX(ha)) + speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | + FC_PORTSPEED_8GBIT; else speed = FC_PORTSPEED_1GBIT; fc_host_supported_speeds(vha->host) = speed; diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index ad54099cb80..524f9eb7fcd 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -30,14 +30,31 @@ qla2x00_bsg_sp_free(void *data, void *ptr) struct scsi_qla_host *vha = sp->fcport->vha; struct fc_bsg_job *bsg_job = sp->u.bsg_job; struct qla_hw_data *ha = vha->hw; + struct qla_mt_iocb_rqst_fx00 *piocb_rqst; - dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, - bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + if (sp->type == SRB_FXIOCB_BCMD) { + piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) + &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; - dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } else { + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } if (sp->type == SRB_CT_CMD || + sp->type == SRB_FXIOCB_BCMD || sp->type == SRB_ELS_CMD_HST) kfree(sp->fcport); qla2x00_rel_sp(vha, sp); @@ -108,7 +125,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) uint32_t len; uint32_t oper; - if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) { + if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { ret = -EINVAL; goto exit_fcp_prio_cfg; } @@ -252,6 +269,12 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) type = "FC_BSG_HST_ELS_NOLOGIN"; } + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); + rval = -EIO; + goto done; + } + /* pass through is supported only for ISP 4Gb or higher */ if (!IS_FWI2_CAPABLE(ha)) { ql_dbg(ql_dbg_user, vha, 0x7001, @@ -309,12 +332,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job) NPH_FABRIC_CONTROLLER : NPH_F_PORT; } - if (!vha->flags.online) { - ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); - rval = -EIO; - goto done; - } - req_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); @@ -382,7 +399,7 @@ done_unmap_sg: goto done_free_fcport; done_free_fcport: - if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN) + if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) kfree(fcport); done: return rval; @@ -542,7 +559,7 @@ qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, uint16_t new_config[4]; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) goto done_reset_internal; memset(new_config, 0 , sizeof(new_config)); @@ -610,9 +627,10 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, { int ret = 0; int rval = 0; + unsigned long rem_tmo = 0, current_tmo = 0; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) goto done_set_internal; if (mode == INTERNAL_LOOPBACK) @@ -635,8 +653,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, } /* Wait for DCBX complete event */ - if (!wait_for_completion_timeout(&ha->dcbx_comp, - (DCBX_COMP_TIMEOUT * HZ))) { + current_tmo = DCBX_COMP_TIMEOUT * HZ; + while (1) { + rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, + current_tmo); + if (!ha->idc_extend_tmo || rem_tmo) { + ha->idc_extend_tmo = 0; + break; + } + current_tmo = ha->idc_extend_tmo * HZ; + ha->idc_extend_tmo = 0; + } + + if (!rem_tmo) { ql_dbg(ql_dbg_user, vha, 0x7022, "DCBX completion not received.\n"); ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); @@ -661,6 +690,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, } ha->notify_dcbx_comp = 0; + ha->idc_extend_tmo = 0; done_set_internal: return rval; @@ -751,10 +781,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) elreq.transfer_size = req_data_len; elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + elreq.iteration_count = + bsg_job->request->rqst_data.h_vendor.vendor_cmd[2]; if (atomic_read(&vha->loop_state) == LOOP_READY && (ha->current_topology == ISP_CFG_F || - ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && + ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) { @@ -764,7 +796,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) command_sent = INT_DEF_LB_ECHO_CMD; rval = qla2x00_echo_test(vha, &elreq, response); } else { - if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { + if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { memset(config, 0, sizeof(config)); memset(new_config, 0, sizeof(new_config)); @@ -787,7 +819,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) "elreq.options=%04x\n", elreq.options); if (elreq.options == EXTERNAL_LOOPBACK) - if (IS_QLA8031(ha)) + if (IS_QLA8031(ha) || IS_QLA8044(ha)) rval = qla81xx_set_loopback_mode(vha, config, new_config, elreq.options); else @@ -1065,14 +1097,6 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) return -EINVAL; } - ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request + - sizeof(struct fc_bsg_request)); - if (!ql84_mgmt) { - ql_log(ql_log_warn, vha, 0x703b, - "MGMT header not provided, exiting.\n"); - return -EINVAL; - } - mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); if (!mn) { ql_log(ql_log_warn, vha, 0x703c, @@ -1083,7 +1107,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) memset(mn, 0, sizeof(struct access_chip_84xx)); mn->entry_type = ACCESS_CHIP_IOCB_TYPE; mn->entry_count = 1; - + ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request); switch (ql84_mgmt->mgmt.cmd) { case QLA84_MGMT_READ_MEM: case QLA84_MGMT_GET_INFO: @@ -1255,6 +1279,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) int rval = 0; struct qla_port_param *port_param = NULL; fc_port_t *fcport = NULL; + int found = 0; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint8_t *rsp_ptr = NULL; @@ -1263,14 +1288,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) return -EINVAL; } - port_param = (struct qla_port_param *)((char *)bsg_job->request + - sizeof(struct fc_bsg_request)); - if (!port_param) { - ql_log(ql_log_warn, vha, 0x7047, - "port_param header not provided.\n"); - return -EINVAL; - } - + port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request); if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { ql_log(ql_log_warn, vha, 0x7048, "Invalid destination type.\n"); @@ -1284,10 +1302,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, fcport->port_name, sizeof(fcport->port_name))) continue; + + found = 1; break; } - if (!fcport) { + if (!found) { ql_log(ql_log_warn, vha, 0x7049, "Failed to find port.\n"); return -EINVAL; @@ -1314,12 +1334,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) if (rval) { ql_log(ql_log_warn, vha, 0x704c, - "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- " - "%04x %x %04x %04x.\n", fcport->port_name[0], - fcport->port_name[1], fcport->port_name[2], - fcport->port_name[3], fcport->port_name[4], - fcport->port_name[5], fcport->port_name[6], - fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); + "iIDMA cmd failed for %8phN -- " + "%04x %x %04x %04x.\n", fcport->port_name, + rval, fcport->fp_speed, mb[0], mb[1]); rval = (DID_ERROR << 16); } else { if (!port_param->mode) { @@ -1420,9 +1437,12 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job) if (ha->flags.nic_core_reset_hdlr_active) return -EBUSY; + mutex_lock(&ha->optrom_mutex); rval = qla2x00_optrom_setup(bsg_job, vha, 0); - if (rval) + if (rval) { + mutex_unlock(&ha->optrom_mutex); return rval; + } ha->isp_ops->read_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); @@ -1436,6 +1456,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job) vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; ha->optrom_state = QLA_SWAITING; + mutex_unlock(&ha->optrom_mutex); bsg_job->job_done(bsg_job); return rval; } @@ -1448,9 +1469,12 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) struct qla_hw_data *ha = vha->hw; int rval = 0; + mutex_lock(&ha->optrom_mutex); rval = qla2x00_optrom_setup(bsg_job, vha, 1); - if (rval) + if (rval) { + mutex_unlock(&ha->optrom_mutex); return rval; + } /* Set the isp82xx_no_md_cap not to capture minidump */ ha->flags.isp82xx_no_md_cap = 1; @@ -1466,6 +1490,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; ha->optrom_state = QLA_SWAITING; + mutex_unlock(&ha->optrom_mutex); bsg_job->job_done(bsg_job); return rval; } @@ -1878,7 +1903,209 @@ done: bsg_job->reply->reply_payload_rcv_len = 0; bsg_job->reply->result = (DID_OK) << 16; bsg_job->job_done(bsg_job); - /* Always retrun success, vendor rsp carries correct status */ + /* Always return success, vendor rsp carries correct status */ + return 0; +} + +static int +qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = (DRIVER_ERROR << 16); + struct qla_mt_iocb_rqst_fx00 *piocb_rqst; + srb_t *sp; + int req_sg_cnt = 0, rsp_sg_cnt = 0; + struct fc_port *fcport; + char *type = "FC_BSG_HST_FX_MGMT"; + + /* Copy the IOCB specific information */ + piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) + &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + + /* Dump the vendor information */ + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, + (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00)); + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x70d0, + "Host is not online.\n"); + rval = -EIO; + goto done; + } + + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { + req_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + if (!req_sg_cnt) { + ql_log(ql_log_warn, vha, 0x70c7, + "dma_map_sg return %d for request\n", req_sg_cnt); + rval = -ENOMEM; + goto done; + } + } + + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { + rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + if (!rsp_sg_cnt) { + ql_log(ql_log_warn, vha, 0x70c8, + "dma_map_sg return %d for reply\n", rsp_sg_cnt); + rval = -ENOMEM; + goto done_unmap_req_sg; + } + } + + ql_dbg(ql_dbg_user, vha, 0x70c9, + "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " + "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, + req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); + + /* Allocate a dummy fcport structure, since functions preparing the + * IOCB and mailbox command retrieves port specific information + * from fcport structure. For Host based ELS commands there will be + * no fcport structure allocated + */ + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + ql_log(ql_log_warn, vha, 0x70ca, + "Failed to allocate fcport.\n"); + rval = -ENOMEM; + goto done_unmap_rsp_sg; + } + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_log(ql_log_warn, vha, 0x70cb, + "qla2x00_get_sp failed.\n"); + rval = -ENOMEM; + goto done_free_fcport; + } + + /* Initialize all required fields of fcport */ + fcport->vha = vha; + fcport->loop_id = piocb_rqst->dataword; + + sp->type = SRB_FXIOCB_BCMD; + sp->name = "bsg_fx_mgmt"; + sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); + sp->u.bsg_job = bsg_job; + sp->free = qla2x00_bsg_sp_free; + sp->done = qla2x00_bsg_job_done; + + ql_dbg(ql_dbg_user, vha, 0x70cc, + "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", + type, piocb_rqst->func_type, fcport->loop_id); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x70cd, + "qla2x00_start_sp failed=%d.\n", rval); + mempool_free(sp, ha->srb_mempool); + rval = -EIO; + goto done_free_fcport; + } + return rval; + +done_free_fcport: + kfree(fcport); + +done_unmap_rsp_sg: + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); +done_unmap_req_sg: + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) + dma_unmap_sg(&ha->pdev->dev, + bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + +done: + return rval; +} + +static int +qla26xx_serdes_op(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + int rval = 0; + struct qla_serdes_reg sr; + + memset(&sr, 0, sizeof(sr)); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); + + switch (sr.cmd) { + case INT_SC_SERDES_WRITE_REG: + rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); + bsg_job->reply->reply_payload_rcv_len = 0; + break; + case INT_SC_SERDES_READ_REG: + rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); + bsg_job->reply->reply_payload_rcv_len = sizeof(sr); + break; + default: + ql_dbg(ql_dbg_user, vha, 0x708c, + "Unknown serdes cmd %x.\n", sr.cmd); + rval = -EINVAL; + break; + } + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : 0; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + return 0; +} + +static int +qla8044_serdes_op(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + int rval = 0; + struct qla_serdes_reg_ex sr; + + memset(&sr, 0, sizeof(sr)); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); + + switch (sr.cmd) { + case INT_SC_SERDES_WRITE_REG: + rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); + bsg_job->reply->reply_payload_rcv_len = 0; + break; + case INT_SC_SERDES_READ_REG: + rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); + bsg_job->reply->reply_payload_rcv_len = sizeof(sr); + break; + default: + ql_dbg(ql_dbg_user, vha, 0x70cf, + "Unknown serdes cmd %x.\n", sr.cmd); + rval = -EINVAL; + break; + } + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : 0; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); return 0; } @@ -1928,6 +2155,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) case QL_VND_DIAG_IO_CMD: return qla24xx_process_bidir_cmd(bsg_job); + case QL_VND_FX00_MGMT_CMD: + return qlafx00_mgmt_cmd(bsg_job); + + case QL_VND_SERDES_OP: + return qla26xx_serdes_op(bsg_job); + + case QL_VND_SERDES_OP_EX: + return qla8044_serdes_op(bsg_job); + default: return -ENOSYS; } @@ -2007,8 +2243,10 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) sp = req->outstanding_cmds[cnt]; if (sp) { if (((sp->type == SRB_CT_CMD) || - (sp->type == SRB_ELS_CMD_HST)) + (sp->type == SRB_ELS_CMD_HST) || + (sp->type == SRB_FXIOCB_BCMD)) && (sp->u.bsg_job == bsg_job)) { + req->outstanding_cmds[cnt] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { ql_log(ql_log_warn, vha, 0x7089, @@ -2036,8 +2274,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) done: spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (bsg_job->request->msgcode == FC_BSG_HST_CT) - kfree(sp->fcport); - qla2x00_rel_sp(vha, sp); + sp->free(vha, sp); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index e9f6b9bbf29..d38f9efa56f 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -22,6 +22,9 @@ #define QL_VND_DIAG_IO_CMD 0x0A #define QL_VND_WRITE_I2C 0x10 #define QL_VND_READ_I2C 0x11 +#define QL_VND_FX00_MGMT_CMD 0x12 +#define QL_VND_SERDES_OP 0x13 +#define QL_VND_SERDES_OP_EX 0x14 /* BSG Vendor specific subcode returns */ #define EXT_STATUS_OK 0 @@ -211,4 +214,22 @@ struct qla_i2c_access { uint8_t buffer[0x40]; } __packed; +/* 26xx serdes register interface */ + +/* serdes reg commands */ +#define INT_SC_SERDES_READ_REG 1 +#define INT_SC_SERDES_WRITE_REG 2 + +struct qla_serdes_reg { + uint16_t cmd; + uint16_t addr; + uint16_t val; +} __packed; + +struct qla_serdes_reg_ex { + uint16_t cmd; + uint32_t addr; + uint32_t val; +} __packed; + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 1626de52e32..c72ee97bf3f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -11,36 +11,67 @@ * ---------------------------------------------------------------------- * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- - * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa | - * | Mailbox commands | 0x115b | 0x111a-0x111b | - * | | | 0x112c-0x112e | - * | | | 0x113a | - * | Device Discovery | 0x2087 | 0x2020-0x2022, | + * | Module Init and Probe | 0x017d | 0x004b,0x0141 | + * | | | 0x0144,0x0146 | + * | | | 0x015b-0x0160 | + * | | | 0x016e-0x0170 | + * | Mailbox commands | 0x118d | 0x1018-0x1019 | + * | | | 0x10ca | + * | | | 0x1115-0x1116 | + * | | | 0x111a-0x111b | + * | | | 0x1155-0x1158 | + * | Device Discovery | 0x2095 | 0x2020-0x2022, | + * | | | 0x2011-0x2012, | * | | | 0x2016 | - * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | + * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b | * | | | 0x3027-0x3028 | - * | | | 0x302d-0x302e | - * | DPC Thread | 0x401d | 0x4002,0x4013 | - * | Async Events | 0x5071 | 0x502b-0x502f | + * | | | 0x303d-0x3041 | + * | | | 0x302d,0x3033 | + * | | | 0x3036,0x3038 | + * | | | 0x303a | + * | DPC Thread | 0x4023 | 0x4002,0x4013 | + * | Async Events | 0x5087 | 0x502b-0x502f | * | | | 0x5047,0x5052 | - * | Timer Routines | 0x6011 | | - * | User Space Interactions | 0x70c4 | 0x7018,0x702e, | - * | | | 0x7020,0x7024, | - * | | | 0x7039,0x7045, | - * | | | 0x7073-0x7075, | - * | | | 0x708c, | - * | | | 0x70a5,0x70a6, | - * | | | 0x70a8,0x70ab, | + * | | | 0x5084,0x5075 | + * | | | 0x503d,0x5044 | + * | | | 0x507b | + * | Timer Routines | 0x6012 | | + * | User Space Interactions | 0x70e2 | 0x7018,0x702e | + * | | | 0x7020,0x7024 | + * | | | 0x7039,0x7045 | + * | | | 0x7073-0x7075 | + * | | | 0x70a5-0x70a6 | + * | | | 0x70a8,0x70ab | * | | | 0x70ad-0x70ae | - * | Task Management | 0x803c | 0x8025-0x8026 | - * | | | 0x800b,0x8039 | + * | | | 0x70d7-0x70db | + * | | | 0x70de-0x70df | + * | Task Management | 0x803d | 0x8000,0x800b | + * | | | 0x8019 | + * | | | 0x8025,0x8026 | + * | | | 0x8031,0x8032 | + * | | | 0x8039,0x803c | * | AER/EEH | 0x9011 | | * | Virtual Port | 0xa007 | | - * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 | + * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | + * | | | 0xb09e,0xb0ae | + * | | | 0xb0c3,0xb0c6 | + * | | | 0xb0e0-0xb0ef | + * | | | 0xb085,0xb0dc | + * | | | 0xb107,0xb108 | + * | | | 0xb111,0xb11e | + * | | | 0xb12c,0xb12d | + * | | | 0xb13a,0xb142 | + * | | | 0xb13c-0xb140 | + * | | | 0xb149 | * | MultiQ | 0xc00c | | - * | Misc | 0xd010 | | - * | Target Mode | 0xe070 | | - * | Target Mode Management | 0xf072 | | + * | Misc | 0xd212 | 0xd017-0xd019 | + * | | | 0xd020 | + * | | | 0xd030-0xd0ff | + * | | | 0xd101-0xd1fe | + * | | | 0xd213-0xd2fe | + * | Target Mode | 0xe078 | | + * | Target Mode Management | 0xf072 | 0xf002-0xf003 | + * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000b | | * ---------------------------------------------------------------------- */ @@ -82,7 +113,87 @@ qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr) return ptr + (rsp->length * sizeof(response_t)); } -static int +int +qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, + uint32_t ram_dwords, void **nxt) +{ + int rval; + uint32_t cnt, stat, timer, dwords, idx; + uint16_t mb0, mb1; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + dma_addr_t dump_dma = ha->gid_list_dma; + uint32_t *dump = (uint32_t *)ha->gid_list; + + rval = QLA_SUCCESS; + mb0 = 0; + + WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + dwords = qla2x00_gid_list_size(ha) / 4; + for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; + cnt += dwords, addr += dwords) { + if (cnt + dwords > ram_dwords) + dwords = ram_dwords - cnt; + + WRT_REG_WORD(®->mailbox1, LSW(addr)); + WRT_REG_WORD(®->mailbox8, MSW(addr)); + + WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); + WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); + WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); + WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); + + WRT_REG_WORD(®->mailbox4, MSW(dwords)); + WRT_REG_WORD(®->mailbox5, LSW(dwords)); + + WRT_REG_WORD(®->mailbox9, 0); + WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); + + ha->flags.mbox_int = 0; + for (timer = 6000000; timer; timer--) { + /* Check for pending interrupts. */ + stat = RD_REG_DWORD(®->host_status); + if (stat & HSRX_RISC_INT) { + stat &= 0xff; + + if (stat == 0x1 || stat == 0x2 || + stat == 0x10 || stat == 0x11) { + set_bit(MBX_INTERRUPT, + &ha->mbx_cmd_flags); + + mb0 = RD_REG_WORD(®->mailbox0); + mb1 = RD_REG_WORD(®->mailbox1); + + WRT_REG_DWORD(®->hccr, + HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); + break; + } + + /* Clear this intr; it wasn't a mailbox intr */ + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); + } + udelay(5); + } + ha->flags.mbox_int = 1; + + if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + rval = mb0 & MBS_MASK; + for (idx = 0; idx < dwords; idx++) + ram[cnt + idx] = IS_QLA27XX(ha) ? + le32_to_cpu(dump[idx]) : swab32(dump[idx]); + } else { + rval = QLA_FUNCTION_FAILED; + } + } + + *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; + return rval; +} + +int qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, uint32_t ram_dwords, void **nxt) { @@ -117,6 +228,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, WRT_REG_WORD(®->mailbox5, LSW(dwords)); WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); + ha->flags.mbox_int = 0; for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ stat = RD_REG_DWORD(®->host_status); @@ -142,11 +254,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, } udelay(5); } + ha->flags.mbox_int = 1; if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; for (idx = 0; idx < dwords; idx++) - ram[cnt + idx] = swab32(dump[idx]); + ram[cnt + idx] = IS_QLA27XX(ha) ? + le32_to_cpu(dump[idx]) : swab32(dump[idx]); } else { rval = QLA_FUNCTION_FAILED; } @@ -167,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, if (rval != QLA_SUCCESS) return rval; + set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); + /* External Memory. */ - return qla24xx_dump_ram(ha, 0x100000, *nxt, + rval = qla24xx_dump_ram(ha, 0x100000, *nxt, ha->fw_memory_size - 0x100000 + 1, nxt); + if (rval == QLA_SUCCESS) + set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); + + return rval; } static uint32_t * @@ -186,34 +306,30 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, return buf; } -static inline int -qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) +void +qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) { - int rval = QLA_SUCCESS; - uint32_t cnt; - WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); - for (cnt = 30000; - ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && - rval == QLA_SUCCESS; cnt--) { - if (cnt) - udelay(100); - else - rval = QLA_FUNCTION_TIMEOUT; - } - return rval; + /* 100 usec delay is sufficient enough for hardware to pause RISC */ + udelay(100); + if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) + set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); } -static int +int qla24xx_soft_reset(struct qla_hw_data *ha) { int rval = QLA_SUCCESS; uint32_t cnt; - uint16_t mb0, wd; + uint16_t wd; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - /* Reset RISC. */ + /* + * Reset RISC. The delay is dependent on system architecture. + * Driver can proceed with the reset sequence after waiting + * for a timeout period. + */ WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) @@ -221,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha) udelay(10); } + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) + set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); WRT_REG_DWORD(®->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); - /* Wait for firmware to complete NVRAM accesses. */ - mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); - for (cnt = 10000 ; cnt && mb0; cnt--) { - udelay(5); - mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); - barrier(); - } /* Wait for soft-reset to complete. */ for (cnt = 0; cnt < 30000; cnt++) { @@ -243,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha) udelay(10); } + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); RD_REG_DWORD(®->hccr); /* PCI Posting. */ - for (cnt = 30000; RD_REG_WORD(®->mailbox0) != 0 && + for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) - udelay(100); + udelay(10); else rval = QLA_FUNCTION_TIMEOUT; } + if (rval == QLA_SUCCESS) + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); return rval; } @@ -401,7 +517,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, void *ring; } aq, *aqp; - if (!ha->tgt.atio_q_length) + if (!ha->tgt.atio_ring) return ptr; num_queues = 1; @@ -513,9 +629,9 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) uint32_t cnt, que_idx; uint8_t que_cnt; struct qla2xxx_mq_chain *mq = ptr; - struct device_reg_25xxmq __iomem *reg; + device_reg_t __iomem *reg; - if (!ha->mqenable || IS_QLA83XX(ha)) + if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) return ptr; mq = ptr; @@ -527,13 +643,16 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) ha->max_req_queues : ha->max_rsp_queues; mq->count = htonl(que_cnt); for (cnt = 0; cnt < que_cnt; cnt++) { - reg = (struct device_reg_25xxmq __iomem *) - (ha->mqiobase + cnt * QLA_QUE_PAGE); + reg = ISP_QUE_REG(ha, cnt); que_idx = cnt * 4; - mq->qregs[que_idx] = htonl(RD_REG_DWORD(®->req_q_in)); - mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(®->req_q_out)); - mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(®->rsp_q_in)); - mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(®->rsp_q_out)); + mq->qregs[que_idx] = + htonl(RD_REG_DWORD(®->isp25mq.req_q_in)); + mq->qregs[que_idx+1] = + htonl(RD_REG_DWORD(®->isp25mq.req_q_out)); + mq->qregs[que_idx+2] = + htonl(RD_REG_DWORD(®->isp25mq.rsp_q_in)); + mq->qregs[que_idx+3] = + htonl(RD_REG_DWORD(®->isp25mq.rsp_q_out)); } return ptr + sizeof(struct qla2xxx_mq_chain); @@ -546,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xd000, - "Failed to dump firmware (%x).\n", rval); + "Failed to dump firmware (%x), dump status flags (0x%lx).\n", + rval, ha->fw_dump_cap_flags); ha->fw_dumped = 0; } else { ql_log(ql_log_info, vha, 0xd001, - "Firmware dump saved to temp buffer (%ld/%p).\n", - vha->host_no, ha->fw_dump); + "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", + vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } @@ -935,11 +1055,12 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -962,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla24xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); /* Host interface registers. */ dmp_reg = ®->flash_addr; @@ -1189,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1212,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla25xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; @@ -1464,7 +1588,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt = qla2xxx_copy_queues(ha, nxt); - nxt = qla24xx_copy_eft(ha, nxt); + qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); @@ -1506,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1528,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla81xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; @@ -1783,7 +1909,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt = qla2xxx_copy_queues(ha, nxt); - nxt = qla24xx_copy_eft(ha, nxt); + qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); @@ -1825,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1846,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla83xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); WRT_REG_DWORD(®->iobase_addr, 0x6000); dmp_reg = ®->iobase_window; @@ -2272,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt += sizeof(fw->code_ram); nxt += (ha->fw_memory_size - 0x100000 + 1); goto copy_queue; - } else + } else { + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); ql_log(ql_log_warn, vha, 0xd010, "bigger hammer success?\n"); + } } rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), @@ -2285,7 +2415,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) copy_queue: nxt = qla2xxx_copy_queues(ha, nxt); - nxt = qla24xx_copy_eft(ha, nxt); + qla24xx_copy_eft(ha, nxt); /* Chain entries -- started with MQ. */ nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); @@ -2524,7 +2654,7 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) if (!ql_mask_match(level)) return; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) mbx_reg = ®82->mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha)) mbx_reg = ®24->mailbox0; diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 35e20b4f8b6..e1fc4e66966 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -348,3 +348,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); #define ql_dbg_tgt 0x00004000 /* Target mode */ #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ + +extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, + uint32_t, void **); +extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, + uint32_t, void **); +extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, + struct qla_hw_data *); +extern int qla24xx_soft_reset(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c6509911772..de5d0ae19d8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -35,6 +35,7 @@ #include "qla_bsg.h" #include "qla_nx.h" +#include "qla_nx2.h" #define QLA2XXX_DRIVER_NAME "qla2xxx" #define QLA2XXX_APIDEV "ql2xapidev" #define QLA2XXX_MANUFACTURER "QLogic Corporation" @@ -245,7 +246,6 @@ #define MAX_CMDSZ 16 /* SCSI maximum CDB size. */ #include "qla_fw.h" - /* * Timeout timer counts in seconds */ @@ -265,6 +265,7 @@ #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ #define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */ +#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ struct req_que; @@ -284,6 +285,7 @@ struct sd_dif_tuple { struct srb_cmd { struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ uint32_t request_sense_length; + uint32_t fw_sense_length; uint8_t *request_sense_ptr; void *ctx; }; @@ -321,7 +323,39 @@ struct srb_iocb { uint32_t flags; uint32_t lun; uint32_t data; + struct completion comp; + __le16 comp_status; } tmf; + struct { +#define SRB_FXDISC_REQ_DMA_VALID BIT_0 +#define SRB_FXDISC_RESP_DMA_VALID BIT_1 +#define SRB_FXDISC_REQ_DWRD_VALID BIT_2 +#define SRB_FXDISC_RSP_DWRD_VALID BIT_3 +#define FXDISC_TIMEOUT 20 + uint8_t flags; + uint32_t req_len; + uint32_t rsp_len; + void *req_addr; + void *rsp_addr; + dma_addr_t req_dma_handle; + dma_addr_t rsp_dma_handle; + __le32 adapter_id; + __le32 adapter_id_hi; + __le16 req_func_type; + __le32 req_data; + __le32 req_data_extra; + __le32 result; + __le32 seq_number; + __le16 fw_flags; + struct completion fxiocb_comp; + __le32 reserved_0; + uint8_t reserved_1; + } fxiocb; + struct { + uint32_t cmd_hndl; + __le16 comp_status; + struct completion comp; + } abt; } u; struct timer_list timer; @@ -338,6 +372,10 @@ struct srb_iocb { #define SRB_TM_CMD 7 #define SRB_SCSI_CMD 8 #define SRB_BIDI_CMD 9 +#define SRB_FXIOCB_DCMD 10 +#define SRB_FXIOCB_BCMD 11 +#define SRB_ABT_CMD 12 + typedef struct srb { atomic_t ref_count; @@ -368,6 +406,10 @@ typedef struct srb { (sp->u.scmd.request_sense_ptr) #define SET_CMD_SENSE_PTR(sp, ptr) \ (sp->u.scmd.request_sense_ptr = ptr) +#define GET_FW_SENSE_LEN(sp) \ + (sp->u.scmd.fw_sense_length) +#define SET_FW_SENSE_LEN(sp, len) \ + (sp->u.scmd.fw_sense_length = len) struct msg_echo_lb { dma_addr_t send_dma; @@ -376,6 +418,7 @@ struct msg_echo_lb { uint16_t rsp_sg_cnt; uint16_t options; uint32_t transfer_size; + uint32_t iteration_count; }; /* @@ -542,12 +585,76 @@ struct device_reg_25xxmq { uint32_t atio_q_out; }; + +struct device_reg_fx00 { + uint32_t mailbox0; /* 00 */ + uint32_t mailbox1; /* 04 */ + uint32_t mailbox2; /* 08 */ + uint32_t mailbox3; /* 0C */ + uint32_t mailbox4; /* 10 */ + uint32_t mailbox5; /* 14 */ + uint32_t mailbox6; /* 18 */ + uint32_t mailbox7; /* 1C */ + uint32_t mailbox8; /* 20 */ + uint32_t mailbox9; /* 24 */ + uint32_t mailbox10; /* 28 */ + uint32_t mailbox11; + uint32_t mailbox12; + uint32_t mailbox13; + uint32_t mailbox14; + uint32_t mailbox15; + uint32_t mailbox16; + uint32_t mailbox17; + uint32_t mailbox18; + uint32_t mailbox19; + uint32_t mailbox20; + uint32_t mailbox21; + uint32_t mailbox22; + uint32_t mailbox23; + uint32_t mailbox24; + uint32_t mailbox25; + uint32_t mailbox26; + uint32_t mailbox27; + uint32_t mailbox28; + uint32_t mailbox29; + uint32_t mailbox30; + uint32_t mailbox31; + uint32_t aenmailbox0; + uint32_t aenmailbox1; + uint32_t aenmailbox2; + uint32_t aenmailbox3; + uint32_t aenmailbox4; + uint32_t aenmailbox5; + uint32_t aenmailbox6; + uint32_t aenmailbox7; + /* Request Queue. */ + uint32_t req_q_in; /* A0 - Request Queue In-Pointer */ + uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */ + /* Response Queue. */ + uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */ + uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */ + /* Init values shadowed on FW Up Event */ + uint32_t initval0; /* B0 */ + uint32_t initval1; /* B4 */ + uint32_t initval2; /* B8 */ + uint32_t initval3; /* BC */ + uint32_t initval4; /* C0 */ + uint32_t initval5; /* C4 */ + uint32_t initval6; /* C8 */ + uint32_t initval7; /* CC */ + uint32_t fwheartbeat; /* D0 */ + uint32_t pseudoaen; /* D4 */ +}; + + + typedef union { struct device_reg_2xxx isp; struct device_reg_24xx isp24; struct device_reg_25xxmq isp25mq; struct device_reg_82xx isp82; -} device_reg_t; + struct device_reg_fx00 ispfx00; +} __iomem device_reg_t; #define ISP_REQ_Q_IN(ha, reg) \ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ @@ -602,6 +709,20 @@ typedef struct { #define IOCTL_CMD BIT_2 } mbx_cmd_t; +struct mbx_cmd_32 { + uint32_t out_mb; /* outbound from driver */ + uint32_t in_mb; /* Incoming from RISC */ + uint32_t mb[MAILBOX_REGISTER_COUNT]; + long buf_size; + void *bufp; + uint32_t tov; + uint8_t flags; +#define MBX_DMA_IN BIT_0 +#define MBX_DMA_OUT BIT_1 +#define IOCTL_CMD BIT_2 +}; + + #define MBX_TOV_SECONDS 30 /* @@ -677,7 +798,17 @@ typedef struct { #define MBA_BYPASS_NOTIFICATION 0x8043 /* Auto bypass notification. */ #define MBA_DISCARD_RND_FRAME 0x8048 /* discard RND frame due to error. */ #define MBA_REJECTED_FCP_CMD 0x8049 /* rejected FCP_CMD. */ - +#define MBA_FW_NOT_STARTED 0x8050 /* Firmware not started */ +#define MBA_FW_STARTING 0x8051 /* Firmware starting */ +#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */ +#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */ +#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */ +#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */ +#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change + Notification */ +#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */ +#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */ +#define MBA_FW_INIT_INPROGRESS 0x8500 /* Firmware boot in progress */ /* 83XX FCoE specific */ #define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ @@ -731,7 +862,6 @@ typedef struct { */ #define MBC_LOAD_RAM 1 /* Load RAM. */ #define MBC_EXECUTE_FIRMWARE 2 /* Execute firmware. */ -#define MBC_WRITE_RAM_WORD 4 /* Write RAM word. */ #define MBC_READ_RAM_WORD 5 /* Read RAM word. */ #define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */ #define MBC_VERIFY_CHECKSUM 7 /* Verify checksum. */ @@ -798,8 +928,17 @@ typedef struct { #define MBC_LUN_RESET 0x7E /* Send LUN reset */ /* + * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones + * should be defined with MBC_MR_* + */ +#define MBC_MR_DRV_SHUTDOWN 0x6A + +/* * ISP24xx mailbox commands */ +#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */ +#define MBC_READ_SERDES 0x4 /* Read serdes word. */ +#define MBC_LOAD_DUMP_MPI_RAM 0x5 /* Load/Dump MPI RAM. */ #define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ #define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */ #define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */ @@ -826,6 +965,13 @@ typedef struct { */ #define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */ +/* + * ISP8044 mailbox commands + */ +#define MBC_SET_GET_ETH_SERDES_REG 0x150 +#define HCS_WRITE_SERDES 0x3 +#define HCS_READ_SERDES 0x4 + /* Firmware return data sizes */ #define FCAL_MAP_SIZE 128 @@ -1076,8 +1222,9 @@ struct link_statistics { uint32_t unused1[0x1a]; uint32_t tx_frames; uint32_t rx_frames; - uint32_t dumped_frames; - uint32_t unused2[2]; + uint32_t discarded_frames; + uint32_t dropped_frames; + uint32_t unused2[1]; uint32_t nos_rcvd; }; @@ -1482,25 +1629,35 @@ typedef struct { #define PO_MODE_DIF_PASS 2 #define PO_MODE_DIF_REPLACE 3 #define PO_MODE_DIF_TCP_CKSUM 6 -#define PO_ENABLE_DIF_BUNDLING BIT_8 #define PO_ENABLE_INCR_GUARD_SEED BIT_3 -#define PO_DISABLE_INCR_REF_TAG BIT_5 #define PO_DISABLE_GUARD_CHECK BIT_4 +#define PO_DISABLE_INCR_REF_TAG BIT_5 +#define PO_DIS_HEADER_MODE BIT_7 +#define PO_ENABLE_DIF_BUNDLING BIT_8 +#define PO_DIS_FRAME_MODE BIT_9 +#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */ +#define PO_DIS_VALD_APP_REF_ESC BIT_11 + +#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */ +#define PO_DIS_REF_TAG_REPL BIT_13 +#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */ +#define PO_DIS_REF_TAG_VALD BIT_15 + /* * ISP queue - 64-Bit addressing, continuation crc entry structure definition. */ struct crc_context { uint32_t handle; /* System handle. */ - uint32_t ref_tag; - uint16_t app_tag; + __le32 ref_tag; + __le16 app_tag; uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ - uint16_t guard_seed; /* Initial Guard Seed */ - uint16_t prot_opts; /* Requested Data Protection Mode */ - uint16_t blk_size; /* Data size in bytes */ + __le16 guard_seed; /* Initial Guard Seed */ + __le16 prot_opts; /* Requested Data Protection Mode */ + __le16 blk_size; /* Data size in bytes */ uint16_t runt_blk_guard; /* Guard value for runt block (tape * only) */ - uint32_t byte_count; /* Total byte count/ total data + __le32 byte_count; /* Total byte count/ total data * transfer count */ union { struct { @@ -1514,10 +1671,10 @@ struct crc_context { uint32_t reserved_6; } nobundling; struct { - uint32_t dif_byte_count; /* Total DIF byte + __le32 dif_byte_count; /* Total DIF byte * count */ uint16_t reserved_1; - uint16_t dseg_count; /* Data segment count */ + __le16 dseg_count; /* Data segment count */ uint32_t reserved_2; uint32_t data_address[2]; uint32_t data_length; @@ -1608,6 +1765,8 @@ typedef struct { #define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */ #define CS_PORT_BUSY 0x2B /* Port Busy */ #define CS_COMPLETE_CHKCOND 0x30 /* Error? */ +#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request + failure */ #define CS_BAD_PAYLOAD 0x80 /* Driver defined */ #define CS_UNKNOWN 0x81 /* Driver defined */ #define CS_RETRY 0x82 /* Driver defined */ @@ -1832,6 +1991,9 @@ typedef struct fc_port { uint16_t loop_id; uint16_t old_loop_id; + uint16_t tgt_id; + uint16_t old_tgt_id; + uint8_t fcp_prio; uint8_t fabric_port_name[WWN_SIZE]; @@ -1849,8 +2011,15 @@ typedef struct fc_port { uint8_t fc4_type; uint8_t scan_state; + + unsigned long last_queue_full; + unsigned long last_ramp_up; + + uint16_t port_id; } fc_port_t; +#include "qla_mr.h" + /* * Fibre channel port/lun states. */ @@ -1999,6 +2168,7 @@ struct ct_fdmi_hba_attributes { #define FDMI_PORT_SPEED_4GB 0x8 #define FDMI_PORT_SPEED_8GB 0x10 #define FDMI_PORT_SPEED_16GB 0x20 +#define FDMI_PORT_SPEED_32GB 0x40 #define FDMI_PORT_SPEED_UNKNOWN 0x8000 struct ct_fdmi_port_attr { @@ -2392,6 +2562,7 @@ struct isp_operations { int (*start_scsi) (srb_t *); int (*abort_isp) (struct scsi_qla_host *); int (*iospace_config)(struct qla_hw_data*); + int (*initialize_adapter)(struct scsi_qla_host *); }; /* MSI-X Support *************************************************************/ @@ -2430,6 +2601,7 @@ enum qla_work_type { QLA_EVT_ASYNC_ADISC, QLA_EVT_ASYNC_ADISC_DONE, QLA_EVT_UEVENT, + QLA_EVT_AENFX, }; @@ -2457,7 +2629,15 @@ struct qla_work_evt { u32 code; #define QLA_UEVENT_CODE_FW_DUMP 0 } uevent; - } u; + struct { + uint32_t evtcode; + uint32_t mbx[8]; + uint32_t count; + } aenfx; + struct { + srb_t *sp; + } iosb; + } u; }; struct qla_chip_state_84xx { @@ -2479,6 +2659,11 @@ struct qla_statistics { uint32_t total_isp_aborts; uint64_t input_bytes; uint64_t output_bytes; + uint64_t input_requests; + uint64_t output_requests; + uint32_t control_requests; + + uint64_t jiffies_at_last_reset; }; struct bidi_statistics { @@ -2492,10 +2677,9 @@ struct bidi_statistics { #define QLA_MQ_SIZE 32 #define QLA_MAX_QUEUES 256 #define ISP_QUE_REG(ha, id) \ - ((ha->mqenable || IS_QLA83XX(ha)) ? \ - ((device_reg_t __iomem *)(ha->mqiobase) +\ - (QLA_QUE_PAGE * id)) :\ - ((device_reg_t __iomem *)(ha->iobase))) + ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \ + ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\ + ((void __iomem *)ha->iobase)) #define QLA_REQ_QUE_ID(tag) \ ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0) #define QLA_DEFAULT_QUE_QOS 5 @@ -2511,6 +2695,7 @@ struct rsp_que { uint32_t __iomem *rsp_q_out; uint16_t ring_index; uint16_t out_ptr; + uint16_t *in_ptr; /* queue shadow in index */ uint16_t length; uint16_t options; uint16_t rid; @@ -2521,6 +2706,11 @@ struct rsp_que { struct req_que *req; srb_t *status_srb; /* status continuation entry */ struct work_struct q_work; + + dma_addr_t dma_fx00; + response_t *ring_fx00; + uint16_t length_fx00; + uint8_t rsp_pkt[REQUEST_ENTRY_SIZE]; }; /* Request queue data structure */ @@ -2532,6 +2722,7 @@ struct req_que { uint32_t __iomem *req_q_out; uint16_t ring_index; uint16_t in_ptr; + uint16_t *out_ptr; /* queue shadow out index */ uint16_t cnt; uint16_t length; uint16_t options; @@ -2543,8 +2734,12 @@ struct req_que { srb_t **outstanding_cmds; uint32_t current_outstanding_cmd; uint16_t num_outstanding_cmds; -#define MAX_Q_DEPTH 32 int max_q_depth; + + dma_addr_t dma_fx00; + request_t *ring_fx00; + uint16_t length_fx00; + uint8_t req_pkt[REQUEST_ENTRY_SIZE]; }; /* Place holder for FW buffer parameters */ @@ -2554,6 +2749,13 @@ struct qlfc_fw { uint32_t len; }; +struct scsi_qlt_host { + void *target_lport_ptr; + struct mutex tgt_mutex; + struct mutex tgt_host_action_mutex; + struct qla_tgt *qla_tgt; +}; + struct qlt_hw_data { /* Protected by hw lock */ uint32_t enable_class_2:1; @@ -2569,15 +2771,11 @@ struct qlt_hw_data { uint32_t __iomem *atio_q_in; uint32_t __iomem *atio_q_out; - void *target_lport_ptr; struct qla_tgt_func_tmpl *tgt_ops; - struct qla_tgt *qla_tgt; struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS]; uint16_t current_handle; struct qla_tgt_vp_map *tgt_vp_map; - struct mutex tgt_mutex; - struct mutex tgt_host_action_mutex; int saved_set; uint16_t saved_exchange_count; @@ -2619,7 +2817,6 @@ struct qla_hw_data { uint32_t fac_supported :1; uint32_t chip_reset_done :1; - uint32_t port0 :1; uint32_t running_gold_fw :1; uint32_t eeh_busy :1; uint32_t cpu_affinity_enabled :1; @@ -2634,7 +2831,10 @@ struct qla_hw_data { uint32_t isp82xx_no_md_cap:1; uint32_t host_shutting_down:1; uint32_t idc_compl_status:1; - /* 32 bits */ + + uint32_t mr_reset_hdlr_active:1; + uint32_t mr_intr_valid:1; + /* 34 bits */ } flags; /* This spinlock is used to protect "io transactions", you must @@ -2647,13 +2847,27 @@ struct qla_hw_data { spinlock_t hardware_lock ____cacheline_aligned; int bars; int mem_only; - device_reg_t __iomem *iobase; /* Base I/O address */ + device_reg_t *iobase; /* Base I/O address */ resource_size_t pio_address; #define MIN_IOBASE_LEN 0x100 -/* Multi queue data structs */ - device_reg_t __iomem *mqiobase; - device_reg_t __iomem *msixbase; + dma_addr_t bar0_hdl; + + void __iomem *cregbase; + dma_addr_t bar2_hdl; +#define BAR0_LEN_FX00 (1024 * 1024) +#define BAR2_LEN_FX00 (128 * 1024) + + uint32_t rqstq_intr_code; + uint32_t mbx_intr_code; + uint32_t req_que_len; + uint32_t rsp_que_len; + uint32_t req_que_off; + uint32_t rsp_que_off; + + /* Multi queue data structs */ + device_reg_t *mqiobase; + device_reg_t *msixbase; uint16_t msix_count; uint8_t mqenable; struct req_que **req_q_map; @@ -2689,6 +2903,7 @@ struct qla_hw_data { #define PORT_SPEED_4GB 0x03 #define PORT_SPEED_8GB 0x04 #define PORT_SPEED_16GB 0x05 +#define PORT_SPEED_32GB 0x06 #define PORT_SPEED_10GB 0x13 uint16_t link_data_rate; /* F/W operating speed */ @@ -2712,6 +2927,9 @@ struct qla_hw_data { #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 #define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 #define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 +#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 +#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 + uint32_t device_type; #define DT_ISP2100 BIT_0 #define DT_ISP2200 BIT_1 @@ -2730,7 +2948,11 @@ struct qla_hw_data { #define DT_ISP8021 BIT_14 #define DT_ISP2031 BIT_15 #define DT_ISP8031 BIT_16 -#define DT_ISP_LAST (DT_ISP8031 << 1) +#define DT_ISPFX00 BIT_17 +#define DT_ISP8044 BIT_18 +#define DT_ISP2071 BIT_19 +#define DT_ISP2271 BIT_20 +#define DT_ISP_LAST (DT_ISP2271 << 1) #define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 @@ -2756,8 +2978,12 @@ struct qla_hw_data { #define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) #define IS_QLA81XX(ha) (IS_QLA8001(ha)) #define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) +#define IS_QLA8044(ha) (DT_MASK(ha) & DT_ISP8044) #define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) +#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) +#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) +#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ IS_QLA6312(ha) || IS_QLA6322(ha)) @@ -2766,18 +2992,22 @@ struct qla_hw_data { #define IS_QLA25XX(ha) (IS_QLA2532(ha)) #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) #define IS_QLA84XX(ha) (IS_QLA8432(ha)) +#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha)) #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ IS_QLA84XX(ha)) #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ - IS_QLA8031(ha)) + IS_QLA8031(ha) || IS_QLA8044(ha)) +#define IS_P3P_TYPE(ha) (IS_QLA82XX(ha) || IS_QLA8044(ha)) #define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ - IS_QLA82XX(ha) || IS_QLA83XX(ha)) + IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA8044(ha) || IS_QLA27XX(ha)) #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) -#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ - IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) -#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) -#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) +#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) +#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha)) +#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha)) #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) @@ -2787,7 +3017,8 @@ struct qla_hw_data { #define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) #define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) #define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) -#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha)) +#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha)) #define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha))) /* Bit 21 of fw_attributes decides the MCTP capabilities */ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ @@ -2800,6 +3031,7 @@ struct qla_hw_data { (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) +#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) /* HBA serial number */ uint8_t serial0; @@ -2822,6 +3054,7 @@ struct qla_hw_data { uint16_t r_a_tov; int port_down_retry_count; uint8_t mbx_count; + uint8_t aen_mbx_count; uint32_t login_retry_count; /* SNS command interfaces. */ @@ -2869,9 +3102,13 @@ struct qla_hw_data { void *swl; /* These are used by mailbox operations. */ - volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; + uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; + uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT]; + uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00]; mbx_cmd_t *mcp; + struct mbx_cmd_32 *mcp32; + unsigned long mbx_cmd_flags; #define MBX_INTERRUPT 1 #define MBX_INTR_WAIT 2 @@ -2907,6 +3144,9 @@ struct qla_hw_data { uint16_t fw_xcb_count; uint16_t fw_iocb_count; + uint32_t fw_shared_ram_start; + uint32_t fw_shared_ram_end; + uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ uint8_t fw_seriallink_options[4]; uint16_t fw_seriallink_options24[4]; @@ -2915,11 +3155,22 @@ struct qla_hw_data { uint32_t mpi_capabilities; uint8_t phy_version[3]; + /* Firmware dump template */ + void *fw_dump_template; + uint32_t fw_dump_template_len; /* Firmware dump information. */ struct qla2xxx_fw_dump *fw_dump; uint32_t fw_dump_len; int fw_dumped; + unsigned long fw_dump_cap_flags; +#define RISC_PAUSE_CMPL 0 +#define DMA_SHUTDOWN_CMPL 1 +#define ISP_RESET_CMPL 2 +#define RISC_RDY_AFT_RESET 3 +#define RISC_SRAM_DUMP_CMPL 4 +#define RISC_EXT_MEM_DUMP_CMPL 5 int fw_dump_reading; + int prev_minidump_failed; dma_addr_t eft_dma; void *eft; /* Current size of mctp dump is 0x086064 bytes */ @@ -2957,6 +3208,7 @@ struct qla_hw_data { #define QLA_SWRITING 2 uint32_t optrom_region_start; uint32_t optrom_region_size; + struct mutex optrom_mutex; /* PCI expansion ROM image information. */ #define ROM_CODE_TYPE_BIOS 0 @@ -2976,10 +3228,12 @@ struct qla_hw_data { uint32_t nvram_data_off; uint32_t fdt_wrt_disable; + uint32_t fdt_wrt_enable; uint32_t fdt_erase_cmd; uint32_t fdt_block_size; uint32_t fdt_unprotect_sec_cmd; uint32_t fdt_protect_sec_cmd; + uint32_t fdt_wrt_sts_reg_cmd; uint32_t flt_region_flt; uint32_t flt_region_fdt; @@ -3015,6 +3269,7 @@ struct qla_hw_data { int cur_vport_count; struct qla_chip_state_84xx *cs84xx; + struct qla_statistics qla_stats; struct isp_operations *isp_ops; struct workqueue_struct *wq; struct qlfc_fw fw_buf; @@ -3065,6 +3320,7 @@ struct qla_hw_data { /* QLA83XX IDC specific fields */ uint32_t idc_audit_ts; + uint32_t idc_extend_tmo; /* DPC low-priority workqueue */ struct workqueue_struct *dpc_lp_wq; @@ -3074,17 +3330,12 @@ struct qla_hw_data { struct work_struct nic_core_reset; struct work_struct idc_state_handler; struct work_struct nic_core_unrecoverable; + struct work_struct board_disable; -#define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ) -#define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ) - unsigned long host_last_rampdown_time; - unsigned long host_last_rampup_time; - int cfg_lun_q_depth; + struct mr_data_fx00 mr; struct qlt_hw_data tgt; - uint16_t thermal_support; -#define THERMAL_SUPPORT_I2C BIT_0 -#define THERMAL_SUPPORT_ISP BIT_1 + int allow_cna_fw_dump; }; /* @@ -3110,6 +3361,8 @@ typedef struct scsi_qla_host { uint32_t process_response_queue :1; uint32_t difdix_supported:1; uint32_t delete_progress:1; + + uint32_t fw_tgt_reported:1; } flags; atomic_t loop_state; @@ -3143,8 +3396,11 @@ typedef struct scsi_qla_host { #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ #define SCR_PENDING 21 /* SCR in target mode */ -#define HOST_RAMP_DOWN_QUEUE_DEPTH 22 -#define HOST_RAMP_UP_QUEUE_DEPTH 23 +#define PORT_UPDATE_NEEDED 22 +#define FX00_RESET_RECOVERY 23 +#define FX00_TARGET_SCAN 24 +#define FX00_CRITEMP_RECOVERY 25 +#define FX00_HOST_INFO_RESEND 26 uint32_t device_flags; #define SWITCH_FOUND BIT_0 @@ -3183,7 +3439,7 @@ typedef struct scsi_qla_host { uint16_t fcoe_fcf_idx; uint8_t fcoe_vn_port_mac[6]; - uint32_t vp_abort_cnt; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ uint16_t vp_idx; /* vport ID */ @@ -3208,6 +3464,7 @@ typedef struct scsi_qla_host { #define VP_ERR_FAB_LOGOUT 4 #define VP_ERR_ADAP_NORESOURCES 5 struct qla_hw_data *hw; + struct scsi_qlt_host vha_tgt; struct req_que *req; int fw_heartbeat_counter; int seconds_since_last_heartbeat; @@ -3216,6 +3473,7 @@ typedef struct scsi_qla_host { struct bidi_statistics bidi_stats; atomic_t vref_count; + struct qla8044_reset_template reset_tmplt; } scsi_qla_host_t; #define SET_VP_IDX 1 @@ -3235,6 +3493,10 @@ struct qla_tgt_vp_map { test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ atomic_read(&ha->loop_state) == LOOP_DOWN) +#define STATE_TRANSITION(ha) \ + (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ + test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) + #define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ atomic_inc(&__vha->vref_count); \ mb(); \ diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 792a29294b6..2ca39b8e716 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -114,7 +114,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha)) goto out; if (!ha->fce) goto out; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 1ac2b0e3a0e..eb8f57249f1 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -371,7 +371,10 @@ struct init_cb_24xx { * BIT 14 = Data Rate bit 1 * BIT 15 = Data Rate bit 2 * BIT 16 = Enable 75 ohm Termination Select - * BIT 17-31 = Reserved + * BIT 17-28 = Reserved + * BIT 29 = Enable response queue 0 in index shadowing + * BIT 30 = Enable request queue 0 out index shadowing + * BIT 31 = Reserved */ uint32_t firmware_options_3; uint16_t qos; @@ -1134,13 +1137,6 @@ struct device_reg_24xx { #define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */ #define MAX_MULTI_ID_FABRIC 256 /* ... */ -#define for_each_mapped_vp_idx(_ha, _idx) \ - for (_idx = find_next_bit((_ha)->vp_idx_map, \ - (_ha)->max_npiv_vports + 1, 1); \ - _idx <= (_ha)->max_npiv_vports; \ - _idx = find_next_bit((_ha)->vp_idx_map, \ - (_ha)->max_npiv_vports + 1, _idx + 1)) \ - struct mid_conf_entry_24xx { uint16_t reserved_1; @@ -1378,6 +1374,10 @@ struct qla_flt_header { #define FLT_REG_NVRAM_0 0x15 #define FLT_REG_VPD_1 0x16 #define FLT_REG_NVRAM_1 0x17 +#define FLT_REG_VPD_2 0xD4 +#define FLT_REG_NVRAM_2 0xD5 +#define FLT_REG_VPD_3 0xD6 +#define FLT_REG_NVRAM_3 0xD7 #define FLT_REG_FDT 0x1a #define FLT_REG_FLT 0x1c #define FLT_REG_HW_EVENT_0 0x1d @@ -1387,6 +1387,8 @@ struct qla_flt_header { #define FLT_REG_GOLD_FW 0x2f #define FLT_REG_FCP_PRIO_0 0x87 #define FLT_REG_FCP_PRIO_1 0x88 +#define FLT_REG_CNA_FW 0x97 +#define FLT_REG_BOOT_CODE_8044 0xA2 #define FLT_REG_FCOE_FW 0xA4 #define FLT_REG_FCOE_NVRAM_0 0xAA #define FLT_REG_FCOE_NVRAM_1 0xAC diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index eb3ca21a7f1..d48dea8fab1 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -86,6 +86,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *); extern int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); +extern int qla2x00_init_rings(scsi_qla_host_t *); /* * Global Data in qla_os.c source file. @@ -97,7 +98,6 @@ extern int qlport_down_retry; extern int ql2xplogiabsentdevice; extern int ql2xloginretrycount; extern int ql2xfdmienable; -extern int ql2xmaxqdepth; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; extern int ql2xiidmaenable; @@ -134,7 +134,6 @@ extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *); @@ -158,6 +157,10 @@ extern int qla83xx_set_drv_presence(scsi_qla_host_t *vha); extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha); extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha); extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha); +extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); + +extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); +extern void qla2x00_disable_board_on_pci_error(struct work_struct *); /* * Global Functions in qla_mid.c source file. @@ -211,14 +214,19 @@ extern int qla24xx_start_scsi(srb_t *sp); int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, uint16_t, uint16_t, uint8_t); extern int qla2x00_start_sp(srb_t *); -extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t); -extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); extern int qla24xx_dif_start_scsi(srb_t *); extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); extern int qla2x00_issue_marker(scsi_qla_host_t *, int); +extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, + uint32_t *, uint16_t, struct qla_tgt_cmd *); +extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, + uint32_t *, uint16_t, struct qla_tgt_cmd *); +extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, + uint32_t *, uint16_t, struct qla_tgt_cmd *); + /* * Global Function Prototypes in qla_mbx.c source file. @@ -329,6 +337,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, dma_addr_t); extern int qla24xx_abort_command(srb_t *); +extern int qla24xx_async_abort_command(srb_t *); extern int qla24xx_abort_target(struct fc_port *, unsigned int, int); extern int @@ -340,6 +349,16 @@ extern int qla2x00_system_error(scsi_qla_host_t *); extern int +qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t); +extern int +qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *); + +extern int +qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t); +extern int +qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *); + +extern int qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); extern int @@ -358,7 +377,10 @@ extern int qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); extern int -qla2x00_set_driver_version(scsi_qla_host_t *, char *); +qla82xx_set_driver_version(scsi_qla_host_t *, char *); + +extern int +qla25xx_set_driver_version(scsi_qla_host_t *, char *); extern int qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, @@ -427,26 +449,33 @@ extern void qla2x00_free_irqs(scsi_qla_host_t *); extern int qla2x00_get_data_rate(scsi_qla_host_t *); extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t); +extern srb_t * +qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *, + void *); +extern void +qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, + uint32_t); /* * Global Function Prototypes in qla_sup.c source file. */ extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t); extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); +bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t); extern int qla2x00_beacon_on(struct scsi_qla_host *); extern int qla2x00_beacon_off(struct scsi_qla_host *); @@ -461,21 +490,25 @@ extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t); extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *); extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *); extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t, - uint32_t, uint16_t *); + uint32_t, uint16_t *); extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); +extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *, + uint8_t *, uint32_t, uint32_t); +extern void qla8044_watchdog(struct scsi_qla_host *vha); extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *); +extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *); extern int qla2xxx_get_flash_info(scsi_qla_host_t *); extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); @@ -491,12 +524,22 @@ extern void qla2300_fw_dump(scsi_qla_host_t *, int); extern void qla24xx_fw_dump(scsi_qla_host_t *, int); extern void qla25xx_fw_dump(scsi_qla_host_t *, int); extern void qla81xx_fw_dump(scsi_qla_host_t *, int); +extern void qla82xx_fw_dump(scsi_qla_host_t *, int); +extern void qla8044_fw_dump(scsi_qla_host_t *, int); + +extern void qla27xx_fwdump(scsi_qla_host_t *, int); +extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *); +extern int qla27xx_fwdt_template_valid(void *); +extern ulong qla27xx_fwdt_template_size(void *); +extern const void *qla27xx_fwdt_template_default(void); +extern ulong qla27xx_fwdt_template_default_size(void); + extern void qla2x00_dump_regs(scsi_qla_host_t *); extern void qla2x00_dump_buffer(uint8_t *, uint32_t); extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, - uint8_t *, uint32_t); + uint8_t *, uint32_t); extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); /* @@ -529,10 +572,9 @@ struct fc_function_template; extern struct fc_function_template qla2xxx_transport_functions; extern struct fc_function_template qla2xxx_transport_vport_functions; extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); -extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); +extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool); extern void qla2x00_init_host_attr(scsi_qla_host_t *); extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); -extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); extern int qla2x00_echo_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *); @@ -564,6 +606,43 @@ extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); +/* qlafx00 related functions */ +extern int qlafx00_pci_config(struct scsi_qla_host *); +extern int qlafx00_initialize_adapter(struct scsi_qla_host *); +extern void qlafx00_soft_reset(scsi_qla_host_t *); +extern int qlafx00_chip_diag(scsi_qla_host_t *); +extern void qlafx00_config_rings(struct scsi_qla_host *); +extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *); +extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *); +extern irqreturn_t qlafx00_intr_handler(int, void *); +extern void qlafx00_enable_intrs(struct qla_hw_data *); +extern void qlafx00_disable_intrs(struct qla_hw_data *); +extern int qlafx00_abort_target(fc_port_t *, unsigned int, int); +extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int); +extern int qlafx00_start_scsi(srb_t *); +extern int qlafx00_abort_isp(scsi_qla_host_t *); +extern int qlafx00_iospace_config(struct qla_hw_data *); +extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t); +extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int); +extern int qlafx00_fw_ready(scsi_qla_host_t *); +extern int qlafx00_configure_devices(scsi_qla_host_t *); +extern int qlafx00_reset_initialize(scsi_qla_host_t *); +extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t); +extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *); +extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t, + uint32_t *, int); +extern uint32_t qlafx00_fw_state_show(struct device *, + struct device_attribute *, char *); +extern void qlafx00_get_host_speed(struct Scsi_Host *); +extern void qlafx00_init_response_q_entries(struct rsp_que *); + +extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *); +extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *); +extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *); +extern void qlafx00_timer_routine(scsi_qla_host_t *); +extern int qlafx00_rescan_isp(scsi_qla_host_t *); +extern int qlafx00_loop_reset(scsi_qla_host_t *vha); + /* qla82xx related functions */ /* PCI related functions */ @@ -581,9 +660,9 @@ extern int qla82xx_start_firmware(scsi_qla_host_t *); /* Firmware and flash related functions */ extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, - uint32_t, uint32_t); + uint32_t, uint32_t); /* Mailbox related functions */ extern int qla82xx_abort_isp(scsi_qla_host_t *); @@ -624,7 +703,7 @@ extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *); extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, - size_t, char *); + size_t, char *); extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); extern void qla82xx_start_iocbs(scsi_qla_host_t *); @@ -636,6 +715,8 @@ extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *); extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); extern char *qdev_state(uint32_t); extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *); +extern int qla82xx_read_temperature(scsi_qla_host_t *); +extern int qla8044_read_temperature(scsi_qla_host_t *); /* BSG related functions */ extern int qla24xx_bsg_request(struct fc_bsg_job *); @@ -657,5 +738,31 @@ extern void qla82xx_md_free(scsi_qla_host_t *); extern int qla82xx_md_collect(scsi_qla_host_t *); extern void qla82xx_md_prep(scsi_qla_host_t *); extern void qla82xx_set_reset_owner(scsi_qla_host_t *); +extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha); + +/* Function declarations for ISP8044 */ +extern int qla8044_idc_lock(struct qla_hw_data *ha); +extern void qla8044_idc_unlock(struct qla_hw_data *ha); +extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr); +extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val); +extern void qla8044_read_reset_template(struct scsi_qla_host *ha); +extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha); +extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg); +extern void qla8044_wr_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg, const uint32_t value); +extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha); +extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha); +extern int qla8044_device_state_handler(struct scsi_qla_host *vha); +extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha); +extern void qla8044_clear_drv_active(struct qla_hw_data *); +void qla8044_get_minidump(struct scsi_qla_host *vha); +int qla8044_collect_md_data(struct scsi_qla_host *vha); +extern int qla8044_md_get_template(scsi_qla_host_t *); +extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *, + uint32_t, uint32_t); +extern irqreturn_t qla8044_intr_handler(int, void *); +extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t); +extern int qla8044_abort_isp(scsi_qla_host_t *); +extern int qla8044_check_fw_alive(struct scsi_qla_host *); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 9b455250c10..a0df3b1b382 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -49,6 +49,8 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount; + vha->qla_stats.control_requests++; + return (ms_pkt); } @@ -87,6 +89,8 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; ct_pkt->vp_index = vha->vp_idx; + vha->qla_stats.control_requests++; + return (ct_pkt); } @@ -99,17 +103,17 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size) * Returns a pointer to the intitialized @ct_req. */ static inline struct ct_sns_req * -qla2x00_prep_ct_req(struct ct_sns_req *ct_req, uint16_t cmd, uint16_t rsp_size) +qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) { - memset(ct_req, 0, sizeof(struct ct_sns_pkt)); + memset(p, 0, sizeof(struct ct_sns_pkt)); - ct_req->header.revision = 0x01; - ct_req->header.gs_type = 0xFC; - ct_req->header.gs_subtype = 0x02; - ct_req->command = cpu_to_be16(cmd); - ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); + p->p.req.header.revision = 0x01; + p->p.req.header.gs_type = 0xFC; + p->p.req.header.gs_subtype = 0x02; + p->p.req.command = cpu_to_be16(cmd); + p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); - return (ct_req); + return &p->p.req; } static int @@ -188,7 +192,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) GA_NXT_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD, GA_NXT_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; @@ -226,17 +230,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->d_id.b.domain = 0xf0; ql_dbg(ql_dbg_disc, vha, 0x2063, - "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " - "pn %02x%02x%02x%02x%02x%02x%02x%02x " + "GA_NXT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", - fcport->node_name[0], fcport->node_name[1], - fcport->node_name[2], fcport->node_name[3], - fcport->node_name[4], fcport->node_name[5], - fcport->node_name[6], fcport->node_name[7], - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], + fcport->node_name, fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } @@ -284,8 +280,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) gid_pt_rsp_size); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, - gid_pt_rsp_size); + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_type */ @@ -359,7 +354,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) GPN_ID_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GPN_ID_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; @@ -421,7 +416,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) GNN_ID_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GNN_ID_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD, GNN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; @@ -448,17 +443,9 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x2058, - "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x " - "pn %02x%02x%02x%02x%02x%02x%02X%02x " + "GID_PT entry - nn %8phN pn %8phN " "portid=%02x%02x%02x.\n", - list[i].node_name[0], list[i].node_name[1], - list[i].node_name[2], list[i].node_name[3], - list[i].node_name[4], list[i].node_name[5], - list[i].node_name[6], list[i].node_name[7], - list[i].port_name[0], list[i].port_name[1], - list[i].port_name[2], list[i].port_name[3], - list[i].port_name[4], list[i].port_name[5], - list[i].port_name[6], list[i].port_name[7], + list[i].node_name, list[i].port_name, list[i].d_id.b.domain, list[i].d_id.b.area, list[i].d_id.b.al_pa); } @@ -495,7 +482,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha) RFT_ID_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFT_ID_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; @@ -551,7 +538,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha) RFF_ID_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; @@ -606,8 +593,7 @@ qla2x00_rnn_id(scsi_qla_host_t *vha) RNN_ID_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD, - RNN_ID_RSP_SIZE); + ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id, node_name */ @@ -639,9 +625,14 @@ void qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn) { struct qla_hw_data *ha = vha->hw; - sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number, - ha->fw_major_version, ha->fw_minor_version, - ha->fw_subminor_version, qla2x00_version_str); + + if (IS_QLAFX00(ha)) + sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number, + ha->mr.fw_version, qla2x00_version_str); + else + sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number, + ha->fw_major_version, ha->fw_minor_version, + ha->fw_subminor_version, qla2x00_version_str); } /** @@ -671,7 +662,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha) ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; @@ -736,6 +727,8 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len, wc = (data_size - 16) / 4; /* Size in 32bit words. */ sns_cmd->p.cmd.size = cpu_to_le16(wc); + vha->qla_stats.control_requests++; + return (sns_cmd); } @@ -793,17 +786,9 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->d_id.b.domain = 0xf0; ql_dbg(ql_dbg_disc, vha, 0x2061, - "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " - "pn %02x%02x%02x%02x%02x%02x%02x%02x " + "GA_NXT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", - fcport->node_name[0], fcport->node_name[1], - fcport->node_name[2], fcport->node_name[3], - fcport->node_name[4], fcport->node_name[5], - fcport->node_name[6], fcport->node_name[7], - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], + fcport->node_name, fcport->port_name, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } @@ -923,7 +908,7 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) sns_cmd->p.gpn_data[9] != 0x02) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e, "GPN_ID failed, rejected request, gpn_rsp:\n"); - ql_dump_buffer(ql_dbg_disc, vha, 0x207f, + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f, sns_cmd->p.gpn_data, 16); rval = QLA_FUNCTION_FAILED; } else { @@ -988,17 +973,9 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x206e, - "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " - "pn %02x%02x%02x%02x%02x%02x%02x%02x " + "GID_PT entry - nn %8phN pn %8phN " "port_id=%02x%02x%02x.\n", - list[i].node_name[0], list[i].node_name[1], - list[i].node_name[2], list[i].node_name[3], - list[i].node_name[4], list[i].node_name[5], - list[i].node_name[6], list[i].node_name[7], - list[i].port_name[0], list[i].port_name[1], - list[i].port_name[2], list[i].port_name[3], - list[i].port_name[4], list[i].port_name[5], - list[i].port_name[6], list[i].port_name[7], + list[i].node_name, list[i].port_name, list[i].d_id.b.domain, list[i].d_id.b.area, list[i].d_id.b.al_pa); } @@ -1257,18 +1234,18 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) * Returns a pointer to the intitialized @ct_req. */ static inline struct ct_sns_req * -qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd, +qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) { - memset(ct_req, 0, sizeof(struct ct_sns_pkt)); + memset(p, 0, sizeof(struct ct_sns_pkt)); - ct_req->header.revision = 0x01; - ct_req->header.gs_type = 0xFA; - ct_req->header.gs_subtype = 0x10; - ct_req->command = cpu_to_be16(cmd); - ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); + p->p.req.header.revision = 0x01; + p->p.req.header.gs_type = 0xFA; + p->p.req.header.gs_subtype = 0x10; + p->p.req.command = cpu_to_be16(cmd); + p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); - return ct_req; + return &p->p.req; } /** @@ -1296,8 +1273,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD, - RHBA_RSP_SIZE); + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command arguments -- attribute block, attributes. */ @@ -1319,11 +1295,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) size += 4 + WWN_SIZE; ql_dbg(ql_dbg_disc, vha, 0x2025, - "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n", - eiter->a.node_name[0], eiter->a.node_name[1], - eiter->a.node_name[2], eiter->a.node_name[3], - eiter->a.node_name[4], eiter->a.node_name[5], - eiter->a.node_name[6], eiter->a.node_name[7]); + "NodeName = %8phN.\n", eiter->a.node_name); /* Manufacturer. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); @@ -1365,8 +1337,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Model description. */ eiter = (struct ct_fdmi_hba_attr *) (entries + size); eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); - if (ha->model_desc) - strncpy(eiter->a.model_desc, ha->model_desc, 80); + strncpy(eiter->a.model_desc, ha->model_desc, 80); alen = strlen(eiter->a.model_desc); alen += (alen & 3) ? (4 - (alen & 3)) : 4; eiter->len = cpu_to_be16(4 + alen); @@ -1427,16 +1398,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x202e, - "RHBA identifier = " - "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", - ct_req->req.rhba.hba_identifier[0], - ct_req->req.rhba.hba_identifier[1], - ct_req->req.rhba.hba_identifier[2], - ct_req->req.rhba.hba_identifier[3], - ct_req->req.rhba.hba_identifier[4], - ct_req->req.rhba.hba_identifier[5], - ct_req->req.rhba.hba_identifier[6], - ct_req->req.rhba.hba_identifier[7], size); + "RHBA identifier = %8phN size=%d.\n", + ct_req->req.rhba.hba_identifier, size); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076, entries, size); @@ -1486,19 +1449,14 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha) DHBA_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, DHBA_CMD, - DHBA_RSP_SIZE); + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command arguments -- portname. */ memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x2036, - "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n", - ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], - ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], - ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], - ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]); + "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -1543,8 +1501,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD, - RPA_RSP_SIZE); + ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare FDMI command arguments -- attribute block, attributes. */ @@ -1575,6 +1532,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) if (IS_CNA_CAPABLE(ha)) eiter->a.sup_speed = __constant_cpu_to_be32( FDMI_PORT_SPEED_10GB); + else if (IS_QLA27XX(ha)) + eiter->a.sup_speed = __constant_cpu_to_be32( + FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB| + FDMI_PORT_SPEED_8GB); else if (IS_QLA25XX(ha)) eiter->a.sup_speed = __constant_cpu_to_be32( FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| @@ -1623,6 +1584,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) eiter->a.cur_speed = __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB); break; + case PORT_SPEED_32GB: + eiter->a.cur_speed = + __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB); + break; default: eiter->a.cur_speed = __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); @@ -1679,12 +1644,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) qla2x00_update_ms_fdmi_iocb(vha, size + 16); ql_dbg(ql_dbg_disc, vha, 0x203e, - "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n", - ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1], - ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3], - ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5], - ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7], - size); + "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079, entries, size); @@ -1718,7 +1678,8 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha) int rval; struct qla_hw_data *ha = vha->hw; - if (IS_QLA2100(ha) || IS_QLA2200(ha)) + if (IS_QLA2100(ha) || IS_QLA2200(ha) || + IS_QLAFX00(ha)) return QLA_FUNCTION_FAILED; rval = qla2x00_mgmt_svr_login(vha); @@ -1770,7 +1731,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) GFPN_ID_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GFPN_ID_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD, GFPN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; @@ -1837,18 +1798,18 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size, static inline struct ct_sns_req * -qla24xx_prep_ct_fm_req(struct ct_sns_req *ct_req, uint16_t cmd, +qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size) { - memset(ct_req, 0, sizeof(struct ct_sns_pkt)); + memset(p, 0, sizeof(struct ct_sns_pkt)); - ct_req->header.revision = 0x01; - ct_req->header.gs_type = 0xFA; - ct_req->header.gs_subtype = 0x01; - ct_req->command = cpu_to_be16(cmd); - ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); + p->p.req.header.revision = 0x01; + p->p.req.header.gs_type = 0xFA; + p->p.req.header.gs_subtype = 0x01; + p->p.req.command = cpu_to_be16(cmd); + p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4); - return ct_req; + return &p->p.req; } /** @@ -1884,8 +1845,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) GPSC_RSP_SIZE); /* Prepare CT request */ - ct_req = qla24xx_prep_ct_fm_req(&ha->ct_sns->p.req, - GPSC_CMD, GPSC_RSP_SIZE); + ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD, + GPSC_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_name */ @@ -1936,20 +1897,15 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) case BIT_10: list[i].fp_speed = PORT_SPEED_16GB; break; + case BIT_8: + list[i].fp_speed = PORT_SPEED_32GB; + break; } ql_dbg(ql_dbg_disc, vha, 0x205b, "GPSC ext entry - fpn " - "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " - "speed=%04x.\n", - list[i].fabric_port_name[0], - list[i].fabric_port_name[1], - list[i].fabric_port_name[2], - list[i].fabric_port_name[3], - list[i].fabric_port_name[4], - list[i].fabric_port_name[5], - list[i].fabric_port_name[6], - list[i].fabric_port_name[7], + "%8phN speeds=%04x speed=%04x.\n", + list[i].fabric_port_name, be16_to_cpu(ct_rsp->rsp.gpsc.speeds), be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } @@ -1995,7 +1951,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) GFF_ID_RSP_SIZE); /* Prepare CT request */ - ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GFF_ID_CMD, + ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD, GFF_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index edf4d14a133..e2184412617 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -25,7 +25,6 @@ */ static int qla2x00_isp_firmware(scsi_qla_host_t *); static int qla2x00_setup_chip(scsi_qla_host_t *); -static int qla2x00_init_rings(scsi_qla_host_t *); static int qla2x00_fw_ready(scsi_qla_host_t *); static int qla2x00_configure_hba(scsi_qla_host_t *); static int qla2x00_configure_loop(scsi_qla_host_t *); @@ -83,7 +82,9 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha) /* Firmware should use switch negotiated r_a_tov for timeout. */ tmo = ha->r_a_tov / 10 * 2; - if (!IS_FWI2_CAPABLE(ha)) { + if (IS_QLAFX00(ha)) { + tmo = FX00_DEF_RATOV * 2; + } else if (!IS_FWI2_CAPABLE(ha)) { /* * Except for earlier ISPs where the timeout is seeded from the * initialization control block. @@ -270,56 +271,46 @@ done: } static void -qla2x00_async_tm_cmd_done(void *data, void *ptr, int res) +qla2x00_tmf_iocb_timeout(void *data) { - srb_t *sp = (srb_t *)ptr; - struct srb_iocb *iocb = &sp->u.iocb_cmd; - struct scsi_qla_host *vha = (scsi_qla_host_t *)data; - uint32_t flags; - uint16_t lun; - int rval; - - if (!test_bit(UNLOADING, &vha->dpc_flags)) { - flags = iocb->u.tmf.flags; - lun = (uint16_t)iocb->u.tmf.lun; + srb_t *sp = (srb_t *)data; + struct srb_iocb *tmf = &sp->u.iocb_cmd; - /* Issue Marker IOCB */ - rval = qla2x00_marker(vha, vha->hw->req_q_map[0], - vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, - flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); + tmf->u.tmf.comp_status = CS_TIMEOUT; + complete(&tmf->u.tmf.comp); +} - if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { - ql_dbg(ql_dbg_taskm, vha, 0x8030, - "TM IOCB failed (%x).\n", rval); - } - } - sp->free(sp->fcport->vha, sp); +static void +qla2x00_tmf_sp_done(void *data, void *ptr, int res) +{ + srb_t *sp = (srb_t *)ptr; + struct srb_iocb *tmf = &sp->u.iocb_cmd; + complete(&tmf->u.tmf.comp); } int -qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun, +qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, uint32_t tag) { struct scsi_qla_host *vha = fcport->vha; + struct srb_iocb *tm_iocb; srb_t *sp; - struct srb_iocb *tcf; - int rval; + int rval = QLA_FUNCTION_FAILED; - rval = QLA_FUNCTION_FAILED; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + tm_iocb = &sp->u.iocb_cmd; sp->type = SRB_TM_CMD; sp->name = "tmf"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - tcf = &sp->u.iocb_cmd; - tcf->u.tmf.flags = tm_flags; - tcf->u.tmf.lun = lun; - tcf->u.tmf.data = tag; - tcf->timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_tm_cmd_done; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); + tm_iocb->u.tmf.flags = flags; + tm_iocb->u.tmf.lun = lun; + tm_iocb->u.tmf.data = tag; + sp->done = qla2x00_tmf_sp_done; + tm_iocb->timeout = qla2x00_tmf_iocb_timeout; + init_completion(&tm_iocb->u.tmf.comp); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) @@ -329,14 +320,121 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun, "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + wait_for_completion(&tm_iocb->u.tmf.comp); + + rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? + QLA_SUCCESS : QLA_FUNCTION_FAILED; + + if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { + ql_dbg(ql_dbg_taskm, vha, 0x8030, + "TM IOCB failed (%x).\n", rval); + } + + if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { + flags = tm_iocb->u.tmf.flags; + lun = (uint16_t)tm_iocb->u.tmf.lun; + + /* Issue Marker IOCB */ + qla2x00_marker(vha, vha->hw->req_q_map[0], + vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, + flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); + } + +done_free_sp: + sp->free(vha, sp); +done: return rval; +} + +static void +qla24xx_abort_iocb_timeout(void *data) +{ + srb_t *sp = (srb_t *)data; + struct srb_iocb *abt = &sp->u.iocb_cmd; + + abt->u.abt.comp_status = CS_TIMEOUT; + complete(&abt->u.abt.comp); +} + +static void +qla24xx_abort_sp_done(void *data, void *ptr, int res) +{ + srb_t *sp = (srb_t *)ptr; + struct srb_iocb *abt = &sp->u.iocb_cmd; + + complete(&abt->u.abt.comp); +} + +static int +qla24xx_async_abort_cmd(srb_t *cmd_sp) +{ + scsi_qla_host_t *vha = cmd_sp->fcport->vha; + fc_port_t *fcport = cmd_sp->fcport; + struct srb_iocb *abt_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + abt_iocb = &sp->u.iocb_cmd; + sp->type = SRB_ABT_CMD; + sp->name = "abort"; + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); + abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; + sp->done = qla24xx_abort_sp_done; + abt_iocb->timeout = qla24xx_abort_iocb_timeout; + init_completion(&abt_iocb->u.abt.comp); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + + ql_dbg(ql_dbg_async, vha, 0x507c, + "Abort command issued - hdl=%x, target_id=%x\n", + cmd_sp->handle, fcport->tgt_id); + + wait_for_completion(&abt_iocb->u.abt.comp); + + rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? + QLA_SUCCESS : QLA_FUNCTION_FAILED; done_free_sp: - sp->free(fcport->vha, sp); + sp->free(vha, sp); done: return rval; } +int +qla24xx_async_abort_command(srb_t *sp) +{ + unsigned long flags = 0; + + uint32_t handle; + fc_port_t *fcport = sp->fcport; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = vha->req; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (handle = 1; handle < req->num_outstanding_cmds; handle++) { + if (req->outstanding_cmds[handle] == sp) + break; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (handle == req->num_outstanding_cmds) { + /* Command not found. */ + return QLA_FUNCTION_FAILED; + } + if (sp->type == SRB_FXIOCB_DCMD) + return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, + FXDISC_ABORT_IOCTL); + + return qla24xx_async_abort_cmd(sp); +} + void qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) @@ -523,7 +621,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) vha->flags.reset_active = 0; ha->flags.pci_channel_io_perm_failure = 0; ha->flags.eeh_busy = 0; - ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP; + vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); atomic_set(&vha->loop_state, LOOP_DOWN); vha->device_flags = DFLG_NO_CABLE; @@ -551,7 +649,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (rval) { ql_log(ql_log_fatal, vha, 0x004f, "Unable to validate FLASH data.\n"); - return (rval); + return rval; + } + + if (IS_QLA8044(ha)) { + qla8044_read_reset_template(vha); + + /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0. + * If DONRESET_BIT0 is set, drivers should not set dev_state + * to NEED_RESET. But if NEED_RESET is set, drivers should + * should honor the reset. */ + if (ql2xdontresethba == 1) + qla8044_set_idc_dontreset(vha); } ha->isp_ops->get_flash_version(vha, req->ring); @@ -563,12 +672,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (ha->flags.disable_serdes) { /* Mask HBA via NVRAM settings? */ ql_log(ql_log_info, vha, 0x0077, - "Masking HBA WWPN " - "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", - vha->port_name[0], vha->port_name[1], - vha->port_name[2], vha->port_name[3], - vha->port_name[4], vha->port_name[5], - vha->port_name[6], vha->port_name[7]); + "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name); return QLA_FUNCTION_FAILED; } @@ -619,7 +723,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) qla24xx_read_fcp_prio_cfg(vha); - qla2x00_set_driver_version(vha, QLA2XXX_VERSION); + if (IS_P3P_TYPE(ha)) + qla82xx_set_driver_version(vha, QLA2XXX_VERSION); + else + qla25xx_set_driver_version(vha, QLA2XXX_VERSION); return (rval); } @@ -1333,7 +1440,7 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; @@ -1369,7 +1476,13 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) } ha->fw_dumped = 0; - fixed_size = mem_size = eft_size = fce_size = mq_size = 0; + ha->fw_dump_cap_flags = 0; + dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; + req_q_size = rsp_q_size = 0; + + if (IS_QLA27XX(ha)) + goto try_fce; + if (IS_QLA2100(ha) || IS_QLA2200(ha)) { fixed_size = sizeof(struct qla2100_fw_dump); } else if (IS_QLA23XX(ha)) { @@ -1385,6 +1498,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); else fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); + mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { @@ -1399,12 +1513,19 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mq_size += ha->max_rsp_queues * (rsp->length * sizeof(response_t)); } - if (ha->tgt.atio_q_length) + if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); /* Allocate memory for Fibre Channel Event Buffer. */ - if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha)) goto try_eft; +try_fce: + if (ha->fce) + dma_free_coherent(&ha->pdev->dev, + FCE_SIZE, ha->fce, ha->fce_dma); + + /* Allocate memory for Fibre Channel Event Buffer. */ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, GFP_KERNEL); if (!tc) { @@ -1432,7 +1553,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) ha->flags.fce_enabled = 1; ha->fce_dma = tc_dma; ha->fce = tc; + try_eft: + if (ha->eft) + dma_free_coherent(&ha->pdev->dev, + EFT_SIZE, ha->eft, ha->eft_dma); + /* Allocate memory for Extended Trace Buffer. */ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, GFP_KERNEL); @@ -1459,15 +1585,28 @@ try_eft: ha->eft_dma = tc_dma; ha->eft = tc; } + cont_alloc: + if (IS_QLA27XX(ha)) { + if (!ha->fw_dump_template) { + ql_log(ql_log_warn, vha, 0x00ba, + "Failed missing fwdump template\n"); + return; + } + dump_size = qla27xx_fwdt_calculate_dump_size(vha); + ql_dbg(ql_dbg_init, vha, 0x00fa, + "-> allocating fwdump (%x bytes)...\n", dump_size); + goto allocate; + } + req_q_size = req->length * sizeof(request_t); rsp_q_size = rsp->length * sizeof(response_t); - dump_size = offsetof(struct qla2xxx_fw_dump, isp); dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; ha->chain_offset = dump_size; dump_size += mq_size + fce_size; +allocate: ha->fw_dump = vmalloc(dump_size); if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0x00c4, @@ -1489,10 +1628,13 @@ cont_alloc: } return; } + ha->fw_dump_len = dump_size; ql_dbg(ql_dbg_init, vha, 0x00c5, "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); - ha->fw_dump_len = dump_size; + if (IS_QLA27XX(ha)) + return; + ha->fw_dump->signature[0] = 'Q'; ha->fw_dump->signature[1] = 'L'; ha->fw_dump->signature[2] = 'G'; @@ -1616,7 +1758,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) unsigned long flags; uint16_t fw_major_version; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { qla2x00_stop_firmware(vha); @@ -1652,7 +1794,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS) { enable_82xx_npiv: fw_major_version = ha->fw_major_version; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) qla82xx_check_md_needed(vha); else rval = qla2x00_get_fw_version(vha); @@ -1682,8 +1824,10 @@ enable_82xx_npiv: goto failed; if (!fw_major_version && ql2xallocfwdump - && !IS_QLA82XX(ha)) + && !(IS_P3P_TYPE(ha))) qla2x00_alloc_fw_dump(vha); + } else { + goto failed; } } else { ql_log(ql_log_fatal, vha, 0x00cd, @@ -1706,9 +1850,6 @@ enable_82xx_npiv: spin_unlock_irqrestore(&ha->hardware_lock, flags); } - if (IS_QLA83XX(ha)) - goto skip_fac_check; - if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { uint32_t size; @@ -1721,8 +1862,8 @@ enable_82xx_npiv: "Unsupported FAC firmware (%d.%02d.%02d).\n", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); -skip_fac_check: - if (IS_QLA83XX(ha)) { + + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; } @@ -1850,7 +1991,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) int rval; struct qla_hw_data *ha = vha->hw; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; /* Update Serial Link options. */ @@ -1921,7 +2062,11 @@ qla24xx_config_rings(struct scsi_qla_host *vha) icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); - if (ha->mqenable || IS_QLA83XX(ha)) { + if (IS_SHADOW_REG_CAPABLE(ha)) + icb->firmware_options_2 |= + __constant_cpu_to_le32(BIT_30|BIT_29); + + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); icb->rid = __constant_cpu_to_le16(rid); if (ha->flags.msix_enabled) { @@ -1979,7 +2124,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) * * Returns 0 on success. */ -static int +int qla2x00_init_rings(scsi_qla_host_t *vha) { int rval; @@ -1998,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha) req = ha->req_q_map[que]; if (!req) continue; + req->out_ptr = (void *)(req->ring + req->length); + *req->out_ptr = 0; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) req->outstanding_cmds[cnt] = NULL; @@ -2013,8 +2160,13 @@ qla2x00_init_rings(scsi_qla_host_t *vha) rsp = ha->rsp_q_map[que]; if (!rsp) continue; + rsp->in_ptr = (void *)(rsp->ring + rsp->length); + *rsp->in_ptr = 0; /* Initialize response queue entries */ - qla2x00_init_response_q_entries(rsp); + if (IS_QLAFX00(ha)) + qlafx00_init_response_q_entries(rsp); + else + qla2x00_init_response_q_entries(rsp); } ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; @@ -2026,11 +2178,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); + + if (IS_QLAFX00(ha)) { + rval = qlafx00_init_firmware(vha, ha->init_cb_size); + goto next_check; + } + /* Update any ISP specific firmware options before initialization. */ ha->isp_ops->update_fw_options(vha); - ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); - if (ha->flags.npiv_supported) { if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; @@ -2044,6 +2201,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) } rval = qla2x00_init_firmware(vha, ha->init_cb_size); +next_check: if (rval) { ql_log(ql_log_fatal, vha, 0x00d2, "Init Firmware **** FAILED ****.\n"); @@ -2071,6 +2229,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) uint16_t state[5]; struct qla_hw_data *ha = vha->hw; + if (IS_QLAFX00(vha->hw)) + return qlafx00_fw_ready(vha); + rval = QLA_SUCCESS; /* 20 seconds for loop down. */ @@ -2298,14 +2459,6 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) "Topology - %s, Host Loop address 0x%x.\n", connect_type, vha->loop_id); - if (rval) { - ql_log(ql_log_warn, vha, 0x2011, - "%s FAILED\n", __func__); - } else { - ql_dbg(ql_dbg_disc, vha, 0x2012, - "%s success\n", __func__); - } - return(rval); } @@ -3058,22 +3211,13 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) mb); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2004, - "Unable to adjust iIDMA " - "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x " - "%04x.\n", fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], rval, - fcport->fp_speed, mb[0], mb[1]); + "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n", + fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]); } else { ql_dbg(ql_dbg_disc, vha, 0x2005, - "iIDMA adjusted to %s GB/s " - "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", + "iIDMA adjusted to %s GB/s on %8phN.\n", qla2x00_get_link_speed_str(ha, fcport->fp_speed), - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7]); + fcport->port_name); } } @@ -3136,6 +3280,12 @@ void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { fcport->vha = vha; + + if (IS_QLAFX00(vha->hw)) { + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + qla2x00_reg_remote_port(vha, fcport); + return; + } fcport->login_retry = 0; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); @@ -3265,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - fcport->loop_id = FC_NO_LOOP_ID; + qla2x00_clear_loop_id(fcport); } } } @@ -3896,15 +4046,24 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) /* Wait at most MAX_TARGET RSCNs for a stable link. */ wait_time = 256; do { - /* Issue a marker after FW becomes ready. */ - qla2x00_marker(vha, req, rsp, 0, 0, - MK_SYNC_ALL); - vha->marker_needed = 0; + if (!IS_QLAFX00(vha->hw)) { + /* + * Issue a marker after FW becomes + * ready. + */ + qla2x00_marker(vha, req, rsp, 0, 0, + MK_SYNC_ALL); + vha->marker_needed = 0; + } /* Remap devices on Loop. */ clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - qla2x00_configure_loop(vha); + if (IS_QLAFX00(vha->hw)) + qlafx00_configure_devices(vha); + else + qla2x00_configure_loop(vha); + wait_time--; } while (!atomic_read(&vha->loop_down_timer) && !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) @@ -3970,9 +4129,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) if (fcport->drport && atomic_read(&fcport->state) != FCS_UNCONFIGURED) { spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_rport_del(fcport); - spin_lock_irqsave(&ha->vport_slock, flags); } } @@ -3991,10 +4148,18 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha) uint32_t class_type_mask = 0x3; uint16_t fcoe_other_function = 0xffff, i; - qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); - - qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); - qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); + if (IS_QLA8044(ha)) { + drv_presence = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + dev_part_info1 = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_PART_INFO_INDEX); + dev_part_info2 = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_PART_INFO2); + } else { + qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence); + qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1); + qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2); + } for (i = 0; i < 8; i++) { class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask); if ((class_type == QLA83XX_CLASS_TYPE_FCOE) && @@ -4331,7 +4496,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) /* For ISP82XX, driver waits for completion of the commands. * online flag should be set. */ - if (!IS_QLA82XX(ha)) + if (!(IS_P3P_TYPE(ha))) vha->flags.online = 0; ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -4344,7 +4509,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) * Driver waits for the completion of the commands. * the interrupts need to be enabled. */ - if (!IS_QLA82XX(ha)) + if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); @@ -4387,7 +4552,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!ha->flags.eeh_busy) { /* Make sure for ISP 82XX IO DMA is complete */ - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { qla82xx_chip_reset_cleanup(vha); ql_log(ql_log_info, vha, 0x00b4, "Done chip reset cleanup.\n"); @@ -4571,7 +4736,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *vha) { int status = 0; - uint32_t wait_time; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; @@ -4588,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) if (!status && !(status = qla2x00_init_rings(vha))) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->flags.chip_reset_done = 1; + /* Initialize the queues in use */ qla25xx_init_queues(ha); status = qla2x00_fw_ready(vha); if (!status) { - ql_dbg(ql_dbg_taskm, vha, 0x8031, - "Start configure loop status = %d.\n", status); - /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); @@ -4610,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) qlt_24xx_process_atio_queue(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); - /* Wait at most MAX_TARGET RSCNs for a stable link. */ - wait_time = 256; - do { - clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - qla2x00_configure_loop(vha); - wait_time--; - } while (!atomic_read(&vha->loop_down_timer) && - !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) - && wait_time && (test_bit(LOOP_RESYNC_NEEDED, - &vha->dpc_flags))); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } /* if no cable then assume it's good */ if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; - - ql_dbg(ql_dbg_taskm, vha, 0x8032, - "Configure loop done, status = 0x%x.\n", status); } return (status); } @@ -4707,7 +4857,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; vha->flags.online = 0; @@ -4764,17 +4914,16 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv = ha->nvram; /* Determine NVRAM starting address. */ - if (ha->flags.port0) { + if (ha->port_no == 0) { ha->nvram_base = FA_NVRAM_FUNC0_ADDR; ha->vpd_base = FA_NVRAM_VPD0_ADDR; } else { ha->nvram_base = FA_NVRAM_FUNC1_ADDR; ha->vpd_base = FA_NVRAM_VPD1_ADDR; } + ha->nvram_size = sizeof(struct nvram_24xx); ha->vpd_size = FA_NVRAM_VPD_SIZE; - if (IS_QLA82XX(ha)) - ha->vpd_size = FA_VPD_SIZE_82XX; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; @@ -4816,7 +4965,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv->exchange_count = __constant_cpu_to_le16(0); nv->hard_address = __constant_cpu_to_le16(124); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + ha->port_no; + nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -5091,6 +5240,99 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, segments--; } + if (!IS_QLA27XX(ha)) + return rval; + + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; + + ql_dbg(ql_dbg_init, vha, 0x0161, + "Loading fwdump template from %x\n", faddr); + qla24xx_read_flash_data(vha, dcode, faddr, 7); + risc_size = be32_to_cpu(dcode[2]); + ql_dbg(ql_dbg_init, vha, 0x0162, + "-> array size %x dwords\n", risc_size); + if (risc_size == 0 || risc_size == ~0) + goto default_template; + + dlen = (risc_size - 8) * sizeof(*dcode); + ql_dbg(ql_dbg_init, vha, 0x0163, + "-> template allocating %x bytes...\n", dlen); + ha->fw_dump_template = vmalloc(dlen); + if (!ha->fw_dump_template) { + ql_log(ql_log_warn, vha, 0x0164, + "Failed fwdump template allocate %x bytes.\n", risc_size); + goto default_template; + } + + faddr += 7; + risc_size -= 8; + dcode = ha->fw_dump_template; + qla24xx_read_flash_data(vha, dcode, faddr, risc_size); + for (i = 0; i < risc_size; i++) + dcode[i] = le32_to_cpu(dcode[i]); + + if (!qla27xx_fwdt_template_valid(dcode)) { + ql_log(ql_log_warn, vha, 0x0165, + "Failed fwdump template validate\n"); + goto default_template; + } + + dlen = qla27xx_fwdt_template_size(dcode); + ql_dbg(ql_dbg_init, vha, 0x0166, + "-> template size %x bytes\n", dlen); + if (dlen > risc_size * sizeof(*dcode)) { + ql_log(ql_log_warn, vha, 0x0167, + "Failed fwdump template exceeds array by %x bytes\n", + (uint32_t)(dlen - risc_size * sizeof(*dcode))); + goto default_template; + } + ha->fw_dump_template_len = dlen; + return rval; + +default_template: + ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n"); + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; + + dlen = qla27xx_fwdt_template_default_size(); + ql_dbg(ql_dbg_init, vha, 0x0169, + "-> template allocating %x bytes...\n", dlen); + ha->fw_dump_template = vmalloc(dlen); + if (!ha->fw_dump_template) { + ql_log(ql_log_warn, vha, 0x016a, + "Failed fwdump template allocate %x bytes.\n", risc_size); + goto failed_template; + } + + dcode = ha->fw_dump_template; + risc_size = dlen / sizeof(*dcode); + memcpy(dcode, qla27xx_fwdt_template_default(), dlen); + for (i = 0; i < risc_size; i++) + dcode[i] = be32_to_cpu(dcode[i]); + + if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { + ql_log(ql_log_warn, vha, 0x016b, + "Failed fwdump template validate\n"); + goto failed_template; + } + + dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); + ql_dbg(ql_dbg_init, vha, 0x016c, + "-> template size %x bytes\n", dlen); + ha->fw_dump_template_len = dlen; + return rval; + +failed_template: + ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n"); + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; return rval; } @@ -5205,7 +5447,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) uint32_t risc_size; uint32_t i; struct fw_blob *blob; - uint32_t *fwcode, fwclen; + const uint32_t *fwcode; + uint32_t fwclen; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; @@ -5237,7 +5480,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) ql_log(ql_log_fatal, vha, 0x0093, "Unable to verify integrity of firmware image (%Zd).\n", blob->fw->size); - goto fail_fw_integrity; + return QLA_FUNCTION_FAILED; } for (i = 0; i < 4; i++) dcode[i] = be32_to_cpu(fwcode[i + 4]); @@ -5251,7 +5494,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) ql_log(ql_log_fatal, vha, 0x0095, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); - goto fail_fw_integrity; + return QLA_FUNCTION_FAILED; } while (segments && rval == QLA_SUCCESS) { @@ -5265,8 +5508,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) ql_log(ql_log_fatal, vha, 0x0096, "Unable to verify integrity of firmware image " "(%Zd).\n", blob->fw->size); - - goto fail_fw_integrity; + return QLA_FUNCTION_FAILED; } fragment = 0; @@ -5300,10 +5542,100 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) /* Next segment. */ segments--; } + + if (!IS_QLA27XX(ha)) + return rval; + + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; + + ql_dbg(ql_dbg_init, vha, 0x171, + "Loading fwdump template from %x\n", + (uint32_t)((void *)fwcode - (void *)blob->fw->data)); + risc_size = be32_to_cpu(fwcode[2]); + ql_dbg(ql_dbg_init, vha, 0x172, + "-> array size %x dwords\n", risc_size); + if (risc_size == 0 || risc_size == ~0) + goto default_template; + + dlen = (risc_size - 8) * sizeof(*fwcode); + ql_dbg(ql_dbg_init, vha, 0x0173, + "-> template allocating %x bytes...\n", dlen); + ha->fw_dump_template = vmalloc(dlen); + if (!ha->fw_dump_template) { + ql_log(ql_log_warn, vha, 0x0174, + "Failed fwdump template allocate %x bytes.\n", risc_size); + goto default_template; + } + + fwcode += 7; + risc_size -= 8; + dcode = ha->fw_dump_template; + for (i = 0; i < risc_size; i++) + dcode[i] = le32_to_cpu(fwcode[i]); + + if (!qla27xx_fwdt_template_valid(dcode)) { + ql_log(ql_log_warn, vha, 0x0175, + "Failed fwdump template validate\n"); + goto default_template; + } + + dlen = qla27xx_fwdt_template_size(dcode); + ql_dbg(ql_dbg_init, vha, 0x0176, + "-> template size %x bytes\n", dlen); + if (dlen > risc_size * sizeof(*fwcode)) { + ql_log(ql_log_warn, vha, 0x0177, + "Failed fwdump template exceeds array by %x bytes\n", + (uint32_t)(dlen - risc_size * sizeof(*fwcode))); + goto default_template; + } + ha->fw_dump_template_len = dlen; return rval; -fail_fw_integrity: - return QLA_FUNCTION_FAILED; +default_template: + ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n"); + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; + + dlen = qla27xx_fwdt_template_default_size(); + ql_dbg(ql_dbg_init, vha, 0x0179, + "-> template allocating %x bytes...\n", dlen); + ha->fw_dump_template = vmalloc(dlen); + if (!ha->fw_dump_template) { + ql_log(ql_log_warn, vha, 0x017a, + "Failed fwdump template allocate %x bytes.\n", risc_size); + goto failed_template; + } + + dcode = ha->fw_dump_template; + risc_size = dlen / sizeof(*fwcode); + fwcode = qla27xx_fwdt_template_default(); + for (i = 0; i < risc_size; i++) + dcode[i] = be32_to_cpu(fwcode[i]); + + if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { + ql_log(ql_log_warn, vha, 0x017b, + "Failed fwdump template validate\n"); + goto failed_template; + } + + dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); + ql_dbg(ql_dbg_init, vha, 0x017c, + "-> template size %x bytes\n", dlen); + ha->fw_dump_template_len = dlen; + return rval; + +failed_template: + ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n"); + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; + return rval; } int @@ -5536,6 +5868,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* Determine NVRAM starting address. */ ha->nvram_size = sizeof(struct nvram_81xx); ha->vpd_size = FA_NVRAM_VPD_SIZE; + if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) + ha->vpd_size = FA_VPD_SIZE_82XX; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; @@ -5577,7 +5911,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); nv->exchange_count = __constant_cpu_to_le16(0); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + ha->port_no; + nv->port_name[1] = 0x00 + ha->port_no + 1; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -5611,7 +5945,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->enode_mac[2] = 0xDD; nv->enode_mac[3] = 0x04; nv->enode_mac[4] = 0x05; - nv->enode_mac[5] = 0x06 + ha->port_no; + nv->enode_mac[5] = 0x06 + ha->port_no + 1; rval = 1; } @@ -5649,7 +5983,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) icb->enode_mac[2] = 0xDD; icb->enode_mac[3] = 0x04; icb->enode_mac[4] = 0x05; - icb->enode_mac[5] = 0x06 + ha->port_no; + icb->enode_mac[5] = 0x06 + ha->port_no + 1; } /* Use extended-initialization control block. */ @@ -5718,7 +6052,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* Link Down Timeout = 0: * - * When Port Down timer expires we will start returning + * When Port Down timer expires we will start returning * I/O's to OS with "DID_NO_CONNECT". * * Link Down Timeout != 0: @@ -5752,7 +6086,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) ha->login_retry_count = ql2xloginretrycount; /* if not running MSI-X we need handshaking on interrupts */ - if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha)) + if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); /* Enable ZIO. */ @@ -5790,7 +6124,6 @@ int qla82xx_restart_isp(scsi_qla_host_t *vha) { int status, rval; - uint32_t wait_time; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; @@ -5804,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) status = qla2x00_fw_ready(vha); if (!status) { - ql_log(ql_log_info, vha, 0x803c, - "Start configure loop, status =%d.\n", status); - /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); - vha->flags.online = 1; - /* Wait at most MAX_TARGET RSCNs for a stable link. */ - wait_time = 256; - do { - clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - qla2x00_configure_loop(vha); - wait_time--; - } while (!atomic_read(&vha->loop_down_timer) && - !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) && - wait_time && - (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } /* if no cable then assume it's good */ if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; - - ql_log(ql_log_info, vha, 0x8000, - "Configure loop done, status = 0x%x.\n", status); } if (!status) { @@ -5842,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) vha->marker_needed = 1; } - vha->flags.online = 1; - ha->isp_ops->enable_intrs(ha); ha->isp_abort_cnt = 0; @@ -6045,7 +6360,7 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) if (priority < 0) return QLA_FUNCTION_FAILED; - if (IS_QLA82XX(vha->hw)) { + if (IS_P3P_TYPE(vha->hw)) { fcport->fcp_prio = priority & 0xf; return QLA_SUCCESS; } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 68e2c4afc13..b3b1d6fc2d6 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -1,10 +1,33 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ +#include "qla_target.h" +/** + * qla24xx_calc_iocbs() - Determine number of Command Type 3 and + * Continuation Type 1 IOCBs to allocate. + * + * @dsds: number of data segment decriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +static inline uint16_t +qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > 1) { + iocbs += (dsds - 1) / 5; + if ((dsds - 1) % 5) + iocbs++; + } + return iocbs; +} + /* * qla2x00_debounce_register * Debounce register. @@ -37,7 +60,7 @@ qla2x00_poll(struct rsp_que *rsp) unsigned long flags; struct qla_hw_data *ha = rsp->hw; local_irq_save(flags); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) qla82xx_poll(0, rsp); else ha->isp_ops->intr_handler(0, rsp); @@ -58,6 +81,17 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) } static inline void +host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize) +{ + uint32_t *isrc = (uint32_t *) src; + __le32 *odest = (__le32 *) dst; + uint32_t iter = bsize >> 2; + + for (; iter ; iter--) + *odest++ = cpu_to_le32(*isrc++); +} + +static inline void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) { int i; @@ -95,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) { } static inline void -qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) +qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, + struct qla_tgt_cmd *tc) { struct dsd_dma *dsd_ptr, *tdsd_ptr; struct crc_context *ctx; - ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); + if (sp) + ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); + else if (tc) + ctx = (struct crc_context *)tc->ctx; + else { + BUG(); + return; + } /* clean up allocated prev pool */ list_for_each_entry_safe(dsd_ptr, tdsd_ptr, @@ -213,29 +255,27 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo) sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; add_timer(&sp->u.iocb_cmd.timer); sp->free = qla2x00_sp_free; + if ((IS_QLAFX00(sp->fcport->vha->hw)) && + (sp->type == SRB_FXIOCB_DCMD)) + init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); } static inline int qla2x00_gid_list_size(struct qla_hw_data *ha) { - return sizeof(struct gid_list_info) * ha->max_fibre_devices; + if (IS_QLAFX00(ha)) + return sizeof(uint32_t) * 32; + else + return sizeof(struct gid_list_info) * ha->max_fibre_devices; } static inline void -qla2x00_do_host_ramp_up(scsi_qla_host_t *vha) +qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) { - if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth) - return; - - /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */ - if (time_before(jiffies, (vha->hw->host_last_rampdown_time + - HOST_QUEUE_RAMPDOWN_INTERVAL))) - return; - - /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */ - if (time_before(jiffies, (vha->hw->host_last_rampup_time + - HOST_QUEUE_RAMPUP_INTERVAL))) - return; - - set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index d2630317cce..76093152959 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -32,9 +32,11 @@ qla2x00_get_cmd_direction(srb_t *sp) if (cmd->sc_data_direction == DMA_TO_DEVICE) { cflags = CF_WRITE; vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cflags = CF_READ; vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } return (cflags); } @@ -135,7 +137,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) cont_pkt = (cont_a64_entry_t *)req->ring_ptr; /* Load packet defaults. */ - *((uint32_t *)(&cont_pkt->entry_type)) = + *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? + __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) : __constant_cpu_to_le32(CONTINUE_A64_TYPE); return (cont_pkt); @@ -418,6 +421,8 @@ qla2x00_start_scsi(srb_t *sp) __constant_cpu_to_le16(CF_SIMPLE_TAG); break; } + } else { + cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); } /* Load SCSI command packet. */ @@ -471,7 +476,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) struct qla_hw_data *ha = vha->hw; device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { qla82xx_start_iocbs(vha); } else { /* Adjust ring index. */ @@ -483,9 +488,13 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) req->ring_ptr++; /* Set chip new ring index. */ - if (ha->mqenable || IS_QLA83XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { WRT_REG_DWORD(req->req_q_in, req->ring_index); RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); + } else if (IS_QLAFX00(ha)) { + WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); + RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); + QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); } else if (IS_FWI2_CAPABLE(ha)) { WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); RD_REG_DWORD_RELAXED(®->isp24.req_q_in); @@ -514,11 +523,11 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, uint16_t lun, uint8_t type) { mrk_entry_t *mrk; - struct mrk_entry_24xx *mrk24; + struct mrk_entry_24xx *mrk24 = NULL; + struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - mrk24 = NULL; req = ha->req_q_map[0]; mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); if (mrk == NULL) { @@ -589,28 +598,6 @@ int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) return QLA_SUCCESS; } -/** - * qla24xx_calc_iocbs() - Determine number of Command Type 3 and - * Continuation Type 1 IOCBs to allocate. - * - * @dsds: number of data segment decriptors needed - * - * Returns the number of IOCB entries needed to store @dsds. - */ -inline uint16_t -qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) -{ - uint16_t iocbs; - - iocbs = 1; - if (dsds > 1) { - iocbs += (dsds - 1) / 5; - if ((dsds - 1) % 5) - iocbs++; - } - return iocbs; -} - static inline int qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, uint16_t tot_dsds) @@ -648,10 +635,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, cmd_pkt->control_flags = __constant_cpu_to_le16(CF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->control_flags = __constant_cpu_to_le16(CF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } cur_seg = scsi_sglist(cmd); @@ -764,10 +753,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, cmd_pkt->task_mgmt_flags = __constant_cpu_to_le16(TMF_WRITE_DATA); vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { cmd_pkt->task_mgmt_flags = __constant_cpu_to_le16(TMF_READ_DATA); vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; } /* One DSD is available in the Command Type 3 IOCB */ @@ -945,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, return 1; } -static int +int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -957,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, uint32_t *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; - uint32_t prot_int; + uint32_t prot_int; /* protection interval */ uint32_t partial; struct qla2_sgx sgx; dma_addr_t sle_dma; uint32_t sle_dma_len, tot_prot_dma_len = 0; - struct scsi_cmnd *cmd = GET_CMD_SP(sp); - - prot_int = cmd->device->sector_size; + struct scsi_cmnd *cmd; + struct scsi_qla_host *vha; memset(&sgx, 0, sizeof(struct qla2_sgx)); - sgx.tot_bytes = scsi_bufflen(cmd); - sgx.cur_sg = scsi_sglist(cmd); - sgx.sp = sp; - - sg_prot = scsi_prot_sglist(cmd); + if (sp) { + vha = sp->fcport->vha; + cmd = GET_CMD_SP(sp); + prot_int = cmd->device->sector_size; + + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + sg_prot = scsi_prot_sglist(cmd); + } else if (tc) { + vha = tc->vha; + prot_int = tc->blk_sz; + sgx.tot_bytes = tc->bufflen; + sgx.cur_sg = tc->sg; + sg_prot = tc->prot_sg; + } else { + BUG(); + return 1; + } while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { @@ -1004,10 +1009,18 @@ alloc_and_fill: return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } - sp->flags |= SRB_CRC_CTX_DSD_VALID; /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1042,21 +1055,35 @@ alloc_and_fill: return 0; } -static int +int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, - uint16_t tot_dsds) + uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; - struct scatterlist *sg; + struct scatterlist *sg, *sgl; uint32_t *cur_dsd = dsd; int i; uint16_t used_dsds = tot_dsds; - struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_cmnd *cmd; + struct scsi_qla_host *vha; + + if (sp) { + cmd = GET_CMD_SP(sp); + sgl = scsi_sglist(cmd); + vha = sp->fcport->vha; + } else if (tc) { + sgl = tc->sg; + vha = tc->vha; + } else { + BUG(); + return 1; + } - scsi_for_each_sg(cmd, sg, tot_dsds, i) { + + for_each_sg(sgl, sg, tot_dsds, i) { dma_addr_t sle_dma; /* Allocate additional continuation packets? */ @@ -1085,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); - sp->flags |= SRB_CRC_CTX_DSD_VALID; + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1111,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, return 0; } -static int +int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, - uint16_t tot_dsds) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; - struct scatterlist *sg; + struct scatterlist *sg, *sgl; int i; struct scsi_cmnd *cmd; uint32_t *cur_dsd = dsd; - uint16_t used_dsds = tot_dsds; + uint16_t used_dsds = tot_dsds; + struct scsi_qla_host *vha; + + if (sp) { + cmd = GET_CMD_SP(sp); + sgl = scsi_prot_sglist(cmd); + vha = sp->fcport->vha; + } else if (tc) { + vha = tc->vha; + sgl = tc->prot_sg; + } else { + BUG(); + return 1; + } - cmd = GET_CMD_SP(sp); - scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { + ql_dbg(ql_dbg_tgt, vha, 0xe021, + "%s: enter\n", __func__); + + for_each_sg(sgl, sg, tot_dsds, i) { dma_addr_t sle_dma; /* Allocate additional continuation packets? */ @@ -1156,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); - sp->flags |= SRB_CRC_CTX_DSD_VALID; + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1197,7 +1252,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint32_t *cur_dsd, *fcp_dl; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; - struct scatterlist *cur_seg; int sgc; uint32_t total_bytes = 0; uint32_t data_bytes; @@ -1316,11 +1370,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, fcp_cmnd->task_attribute = TSK_ORDERED; break; default: - fcp_cmnd->task_attribute = 0; + fcp_cmnd->task_attribute = TSK_SIMPLE; break; } } else { - fcp_cmnd->task_attribute = 0; + fcp_cmnd->task_attribute = TSK_SIMPLE; } cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ @@ -1396,20 +1450,19 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, if (!bundling && tot_prot_dsds) { if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, - cur_dsd, tot_dsds)) + cur_dsd, tot_dsds, NULL)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, - (tot_dsds - tot_prot_dsds))) + (tot_dsds - tot_prot_dsds), NULL)) goto crc_queuing_error; if (bundling && tot_prot_dsds) { /* Walks dif segments */ - cur_seg = scsi_prot_sglist(cmd); cmd_pkt->control_flags |= __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, - tot_prot_dsds)) + tot_prot_dsds, NULL)) goto crc_queuing_error; } return QLA_SUCCESS; @@ -1489,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1535,7 +1588,12 @@ qla24xx_start_scsi(srb_t *sp) case ORDERED_QUEUE_TAG: cmd_pkt->task = TSK_ORDERED; break; + default: + cmd_pkt->task = TSK_SIMPLE; + break; } + } else { + cmd_pkt->task = TSK_SIMPLE; } /* Load SCSI command packet. */ @@ -1583,7 +1641,6 @@ queuing_error: return QLA_FUNCTION_FAILED; } - /** * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP @@ -1704,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp) tot_prot_dsds = nseg; tot_dsds += nseg; if (req->cnt < (req_cnt + 2)) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1821,7 +1878,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) /* Check for room in outstanding command list. */ handle = req->current_outstanding_cmd; - for (index = 1; req->num_outstanding_cmds; index++) { + for (index = 1; index < req->num_outstanding_cmds; index++) { handle++; if (handle == req->num_outstanding_cmds) handle = 1; @@ -1846,12 +1903,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) skip_cmd_array: /* Check for room on request queue. */ if (req->cnt < req_cnt) { - if (ha->mqenable || IS_QLA83XX(ha)) + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) cnt = RD_REG_DWORD(®->isp25mq.req_q_out); - else if (IS_QLA82XX(ha)) + else if (IS_P3P_TYPE(ha)) cnt = RD_REG_DWORD(®->isp82.req_q_out); else if (IS_FWI2_CAPABLE(ha)) cnt = RD_REG_DWORD(®->isp24.req_q_out); + else if (IS_QLAFX00(ha)) + cnt = RD_REG_DWORD(®->ispfx00.req_q_out); else cnt = qla2x00_debounce_register( ISP_REQ_Q_OUT(ha, ®->isp)); @@ -1869,8 +1928,13 @@ skip_cmd_array: req->cnt -= req_cnt; pkt = req->ring_ptr; memset(pkt, 0, REQUEST_ENTRY_SIZE); - pkt->entry_count = req_cnt; - pkt->handle = handle; + if (IS_QLAFX00(ha)) { + WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); + WRT_REG_WORD((void __iomem *)&pkt->handle, handle); + } else { + pkt->entry_count = req_cnt; + pkt->handle = handle; + } queuing_error: return pkt; @@ -2053,6 +2117,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) (bsg_job->reply_payload.sg_list))); els_iocb->rx_len = cpu_to_le32(sg_dma_len (bsg_job->reply_payload.sg_list)); + + sp->fcport->vha->qla_stats.control_requests++; } static void @@ -2130,6 +2196,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) avail_dsds--; } ct_iocb->entry_count = entry_count; + + sp->fcport->vha->qla_stats.control_requests++; } static void @@ -2581,6 +2649,29 @@ queuing_error: return QLA_FUNCTION_FAILED; } +void +qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) +{ + struct srb_iocb *aio = &sp->u.iocb_cmd; + scsi_qla_host_t *vha = sp->fcport->vha; + struct req_que *req = vha->req; + + memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); + abt_iocb->entry_type = ABORT_IOCB_TYPE; + abt_iocb->entry_count = 1; + abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); + abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); + abt_iocb->handle_to_abort = + cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); + abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + abt_iocb->port_id[1] = sp->fcport->d_id.b.area; + abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; + abt_iocb->vp_index = vha->vp_idx; + abt_iocb->req_que_no = cpu_to_le16(req->id); + /* Send the command to the firmware */ + wmb(); +} + int qla2x00_start_sp(srb_t *sp) { @@ -2625,7 +2716,18 @@ qla2x00_start_sp(srb_t *sp) qla2x00_adisc_iocb(sp, pkt); break; case SRB_TM_CMD: - qla24xx_tm_iocb(sp, pkt); + IS_QLAFX00(ha) ? + qlafx00_tm_iocb(sp, pkt) : + qla24xx_tm_iocb(sp, pkt); + break; + case SRB_FXIOCB_DCMD: + case SRB_FXIOCB_BCMD: + qlafx00_fxdisc_iocb(sp, pkt); + break; + case SRB_ABT_CMD: + IS_QLAFX00(ha) ? + qlafx00_abort_iocb(sp, pkt) : + qla24xx_abort_iocb(sp, pkt); break; default: break; @@ -2673,6 +2775,9 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, vha->bidi_stats.transfer_bytes += req_data_len; vha->bidi_stats.io_count++; + vha->qla_stats.output_bytes += req_data_len; + vha->qla_stats.output_requests++; + /* Only one dsd is available for bidirectional IOCB, remaining dsds * are bundled in continuation iocb */ @@ -2784,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index e9dbd74c20d..a56825c73c3 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -13,11 +13,7 @@ #include <scsi/scsi_bsg_fc.h> #include <scsi/scsi_eh.h> -#include "qla_target.h" - static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); -static void qla2x00_process_completed_request(struct scsi_qla_host *, - struct req_que *, uint32_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, @@ -60,6 +56,16 @@ qla2100_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(®->hccr); + /* Check for PCI disconnection */ + if (hccr == 0xffff) { + /* + * Schedule this on the default system workqueue so that + * all the adapter workqueues and the DPC thread can be + * shutdown cleanly. + */ + schedule_work(&ha->board_disable); + break; + } if (hccr & HCCR_RISC_PAUSE) { if (pci_channel_offline(ha->pdev)) break; @@ -108,17 +114,28 @@ qla2100_intr_handler(int irq, void *dev_id) RD_REG_WORD(®->hccr); } } + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - return (IRQ_HANDLED); } +bool +qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) +{ + /* Check for PCI disconnection */ + if (reg == 0xffffffff) { + /* + * Schedule this on the default system workqueue so that all the + * adapter workqueues and the DPC thread can be shutdown + * cleanly. + */ + schedule_work(&vha->hw->board_disable); + return true; + } else + return false; +} + /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: @@ -157,11 +174,14 @@ qla2300_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + break; if (stat & HSR_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_WORD(®->hccr); + if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) ql_log(ql_log_warn, vha, 0x5026, "Parity error -- HCCR=%x, Dumping " @@ -225,14 +245,9 @@ qla2300_intr_handler(int irq, void *dev_id) WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD_RELAXED(®->hccr); } + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - return (IRQ_HANDLED); } @@ -283,11 +298,18 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) { "Complete", "Request Notification", "Time Extension" }; int rval; struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; uint16_t __iomem *wptr; uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; /* Seed data -- mailbox1 -> mailbox7. */ - wptr = (uint16_t __iomem *)®24->mailbox1; + if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) + wptr = (uint16_t __iomem *)®24->mailbox1; + else if (IS_QLA8044(vha->hw)) + wptr = (uint16_t __iomem *)®82->mailbox_out[1]; + else + return; + for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) mb[cnt] = RD_REG_WORD(wptr); @@ -296,40 +318,54 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) "%04x %04x %04x %04x %04x %04x %04x.\n", event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]); - if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { - vha->hw->flags.idc_compl_status = 1; - if (vha->hw->notify_dcbx_comp) - complete(&vha->hw->dcbx_comp); - } - - /* Acknowledgement needed? [Notify && non-zero timeout]. */ - timeout = (descr >> 8) & 0xf; - if (aen != MBA_IDC_NOTIFY || !timeout) - return; + switch (aen) { + /* Handle IDC Error completion case. */ + case MBA_IDC_COMPLETE: + if (mb[1] >> 15) { + vha->hw->flags.idc_compl_status = 1; + if (vha->hw->notify_dcbx_comp && !vha->vp_idx) + complete(&vha->hw->dcbx_comp); + } + break; - ql_dbg(ql_dbg_async, vha, 0x5022, - "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", - vha->host_no, event[aen & 0xff], timeout); + case MBA_IDC_NOTIFY: + /* Acknowledgement needed? [Notify && non-zero timeout]. */ + timeout = (descr >> 8) & 0xf; + ql_dbg(ql_dbg_async, vha, 0x5022, + "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", + vha->host_no, event[aen & 0xff], timeout); - rval = qla2x00_post_idc_ack_work(vha, mb); - if (rval != QLA_SUCCESS) - ql_log(ql_log_warn, vha, 0x5023, - "IDC failed to post ACK.\n"); + if (!timeout) + return; + rval = qla2x00_post_idc_ack_work(vha, mb); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0x5023, + "IDC failed to post ACK.\n"); + break; + case MBA_IDC_TIME_EXT: + vha->hw->idc_extend_tmo = descr; + ql_dbg(ql_dbg_async, vha, 0x5087, + "%lu Inter-Driver Communication %s -- " + "Extend timeout by=%d.\n", + vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); + break; + } } #define LS_UNKNOWN 2 const char * qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) { - static const char * const link_speeds[] = { - "1", "2", "?", "4", "8", "16", "10" + static const char *const link_speeds[] = { + "1", "2", "?", "4", "8", "16", "32", "10" }; +#define QLA_LAST_SPEED 7 if (IS_QLA2100(ha) || IS_QLA2200(ha)) return link_speeds[0]; else if (speed == 0x13) - return link_speeds[6]; - else if (speed < 6) + return link_speeds[QLA_LAST_SPEED]; + else if (speed < QLA_LAST_SPEED) return link_speeds[speed]; else return link_speeds[LS_UNKNOWN]; @@ -614,7 +650,7 @@ skip_rio: break; case MBA_SYSTEM_ERR: /* System Error */ - mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? + mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? RD_REG_WORD(®24->mailbox7) : 0; ql_log(ql_log_warn, vha, 0x5003, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " @@ -631,7 +667,7 @@ skip_rio: vha->device_flags |= DFLG_DEV_FAILED; } else { /* Check to see if MPI timeout occurred */ - if ((mbx & MBX_3) && (ha->flags.port0)) + if ((mbx & MBX_3) && (ha->port_no == 0)) set_bit(MPI_RESET_NEEDED, &vha->dpc_flags); @@ -705,7 +741,8 @@ skip_rio: case MBA_LOOP_DOWN: /* Loop Down Event */ mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) ? RD_REG_WORD(®24->mailbox4) : 0; - mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; + mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) + : mbx; ql_dbg(ql_dbg_async, vha, 0x500b, "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], mbx); @@ -754,11 +791,11 @@ skip_rio: if (IS_QLA2100(ha)) break; - if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { + if (IS_CNA_CAPABLE(ha)) { ql_dbg(ql_dbg_async, vha, 0x500d, "DCBX Completed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); - if (ha->notify_dcbx_comp) + if (ha->notify_dcbx_comp && !vha->vp_idx) complete(&ha->dcbx_comp); } else @@ -1016,7 +1053,7 @@ skip_rio: mb[1], mb[2], mb[3]); break; case MBA_IDC_NOTIFY: - if (IS_QLA8031(vha->hw)) { + if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { mb[4] = RD_REG_WORD(®24->mailbox4); if (((mb[2] & 0x7fff) == MBC_PORT_RESET || (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && @@ -1032,11 +1069,12 @@ skip_rio: } } case MBA_IDC_COMPLETE: - if (ha->notify_lb_portup_comp) + if (ha->notify_lb_portup_comp && !vha->vp_idx) complete(&ha->lb_portup_comp); /* Fallthru */ case MBA_IDC_TIME_EXT: - if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || + IS_QLA8044(ha)) qla81xx_idc_event(vha, mb[0], mb[1]); break; @@ -1065,9 +1103,9 @@ skip_rio: * @ha: SCSI driver HA context * @index: SRB index */ -static void +void qla2x00_process_completed_request(struct scsi_qla_host *vha, - struct req_que *req, uint32_t index) + struct req_que *req, uint32_t index) { srb_t *sp; struct qla_hw_data *ha = vha->hw; @@ -1077,7 +1115,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, ql_log(ql_log_warn, vha, 0x3014, "Invalid SCSI command index (%x).\n", index); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1094,14 +1132,14 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, } else { ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } -static srb_t * +srb_t * qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, struct req_que *req, void *iocb) { @@ -1114,7 +1152,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, "Invalid command index (%x).\n", index); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -1460,8 +1498,7 @@ logio_done: } static void -qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, - struct tsk_mgmt_entry *tsk) +qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) { const char func[] = "TMF-IOCB"; const char *type; @@ -1469,7 +1506,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, srb_t *sp; struct srb_iocb *iocb; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; - int error = 1; sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); if (!sp) @@ -1478,37 +1514,35 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, iocb = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; + iocb->u.tmf.data = QLA_SUCCESS; if (sts->entry_status) { ql_log(ql_log_warn, fcport->vha, 0x5038, "Async-%s error - hdl=%x entry-status(%x).\n", type, sp->handle, sts->entry_status); + iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { ql_log(ql_log_warn, fcport->vha, 0x5039, "Async-%s error - hdl=%x completion status(%x).\n", type, sp->handle, sts->comp_status); - } else if (!(le16_to_cpu(sts->scsi_status) & + iocb->u.tmf.data = QLA_FUNCTION_FAILED; + } else if ((le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { - ql_log(ql_log_warn, fcport->vha, 0x503a, - "Async-%s error - hdl=%x no response info(%x).\n", - type, sp->handle, sts->scsi_status); - } else if (le32_to_cpu(sts->rsp_data_len) < 4) { - ql_log(ql_log_warn, fcport->vha, 0x503b, - "Async-%s error - hdl=%x not enough response(%d).\n", - type, sp->handle, sts->rsp_data_len); - } else if (sts->data[3]) { - ql_log(ql_log_warn, fcport->vha, 0x503c, - "Async-%s error - hdl=%x response(%x).\n", - type, sp->handle, sts->data[3]); - } else { - error = 0; + if (le32_to_cpu(sts->rsp_data_len) < 4) { + ql_log(ql_log_warn, fcport->vha, 0x503b, + "Async-%s error - hdl=%x not enough response(%d).\n", + type, sp->handle, sts->rsp_data_len); + } else if (sts->data[3]) { + ql_log(ql_log_warn, fcport->vha, 0x503c, + "Async-%s error - hdl=%x response(%x).\n", + type, sp->handle, sts->data[3]); + iocb->u.tmf.data = QLA_FUNCTION_FAILED; + } } - if (error) { - iocb->u.tmf.data = error; + if (iocb->u.tmf.data != QLA_SUCCESS) ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, (uint8_t *)sts, sizeof(*sts)); - } sp->done(vha, sp, 0); } @@ -1819,6 +1853,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, if (scsi_status == 0) { bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; + vha->qla_stats.input_bytes += + bsg_job->reply->reply_payload_rcv_len; + vha->qla_stats.input_requests++; rval = EXT_STATUS_OK; } goto done; @@ -1953,6 +1990,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) que = MSW(sts->handle); req = ha->req_q_map[que]; + /* Check for invalid queue pointer */ + if (req == NULL || + que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { + ql_dbg(ql_dbg_io, vha, 0x3059, + "Invalid status handle (0x%x): Bad req pointer. req=%p, " + "que=%u.\n", sts->handle, req, que); + return; + } + /* Validate handle. */ if (handle < req->num_outstanding_cmds) sp = req->outstanding_cmds[handle]; @@ -1963,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ql_dbg(ql_dbg_io, vha, 0x3017, "Invalid status handle (0x%x).\n", sts->handle); - if (IS_QLA82XX(ha)) - set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); - else - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } return; } @@ -1976,9 +2024,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + /* Task Management completion. */ + if (sp->type == SRB_TM_CMD) { + qla24xx_tm_iocb_entry(vha, req, pkt); + return; + } + /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { - qla2x00_do_host_ramp_up(vha); qla2x00_process_completed_request(vha, req, handle); return; @@ -1994,7 +2047,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } - lscsi_status = scsi_status & STATUS_MASK; + lscsi_status = scsi_status & STATUS_MASK; fcport = sp->fcport; @@ -2190,8 +2243,10 @@ check_scsi_status: } ql_dbg(ql_dbg_io, fcport->vha, 0x3021, - "Port down status: port-state=0x%x.\n", - atomic_read(&fcport->state)); + "Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[atomic_read(&fcport->state)]); if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); @@ -2226,21 +2281,15 @@ check_scsi_status: out: if (logit) ql_dbg(ql_dbg_io, fcport->vha, 0x3022, - "FCP command status: 0x%x-0x%x (0x%x) " - "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " - "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " + "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " + "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", comp_status, scsi_status, res, vha->host_no, cp->device->id, cp->device->lun, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, - cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], - cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], - cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, + cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len); - if (!res) - qla2x00_do_host_ramp_up(vha); - if (rsp->status_srb == NULL) sp->done(ha, sp, res); } @@ -2338,7 +2387,7 @@ fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue.\n"); - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -2381,6 +2430,23 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) } } +static void +qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct abort_entry_24xx *pkt) +{ + const char func[] = "ABT_IOCB"; + srb_t *sp; + struct srb_iocb *abt; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + abt = &sp->u.iocb_cmd; + abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); + sp->done(vha, sp, 0); +} + /** * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context @@ -2408,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (pkt->entry_status != 0) { qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); - (void)qlt_24xx_process_response_error(vha, pkt); + if (qlt_24xx_process_response_error(vha, pkt)) + goto process_err; ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } +process_err: switch (pkt->entry_type) { case STATUS_TYPE: @@ -2430,14 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); break; - case TSK_MGMT_IOCB_TYPE: - qla24xx_tm_iocb_entry(vha, rsp->req, - (struct tsk_mgmt_entry *)pkt); - break; - case CT_IOCB_TYPE: + case CT_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); break; - case ELS_IOCB_TYPE: + case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; case ABTS_RECV_24XX: @@ -2446,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: + case CTIO_CRC2: qlt_response_pkt_all_vps(vha, (response_t *)pkt); break; case MARKER_TYPE: @@ -2453,6 +2518,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, * from falling into default case */ break; + case ABORT_IOCB_TYPE: + qla24xx_abort_iocb_entry(vha, rsp->req, + (struct abort_entry_24xx *)pkt); + break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, @@ -2466,7 +2535,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } /* Adjust ring index */ - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); } else @@ -2481,7 +2550,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha)) return; rval = QLA_SUCCESS; @@ -2499,6 +2569,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS) goto next_test; + rval = QLA_SUCCESS; WRT_REG_DWORD(®->iobase_window, 0x0003); for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && rval == QLA_SUCCESS; cnt--) { @@ -2562,6 +2633,8 @@ qla24xx_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; @@ -2617,14 +2690,9 @@ qla24xx_intr_handler(int irq, void *dev_id) if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) ndelay(3500); } + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - return IRQ_HANDLED; } @@ -2636,6 +2704,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; unsigned long flags; + uint32_t stat = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -2649,11 +2718,19 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); + /* + * Use host_status register to check to PCI disconnection before we + * we process the response queue. + */ + stat = RD_REG_DWORD(®->host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + goto out; qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(®->hccr); } +out: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; @@ -2663,9 +2740,11 @@ static irqreturn_t qla25xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; + scsi_qla_host_t *vha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; unsigned long flags; + uint32_t hccr = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -2674,17 +2753,21 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) return IRQ_NONE; } ha = rsp->hw; + vha = pci_get_drvdata(ha->pdev); /* Clear the interrupt, if enabled, for this response queue */ if (!ha->flags.disable_msix_handshake) { reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); - RD_REG_DWORD_RELAXED(®->hccr); + hccr = RD_REG_DWORD_RELAXED(®->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } + if (qla2x00_check_reg_for_disconnect(vha, hccr)) + goto out; queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); +out: return IRQ_HANDLED; } @@ -2715,6 +2798,8 @@ qla24xx_msix_default(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; @@ -2767,13 +2852,9 @@ qla24xx_msix_default(int irq, void *dev_id) } WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); } while (0); + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } return IRQ_HANDLED; } @@ -2825,6 +2906,7 @@ static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { #define MIN_MSIX_COUNT 2 +#define ATIO_VECTOR 2 int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; @@ -2881,36 +2963,49 @@ msix_failed: } /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < ha->msix_count; i++) { + for (i = 0; i < 2; i++) { qentry = &ha->msix_entries[i]; - if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { - ret = request_irq(qentry->vector, - qla83xx_msix_entries[i].handler, - 0, qla83xx_msix_entries[i].name, rsp); - } else if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0, qla82xx_msix_entries[i].name, rsp); - } else { + else ret = request_irq(qentry->vector, msix_entries[i].handler, 0, msix_entries[i].name, rsp); - } - if (ret) { - ql_log(ql_log_fatal, vha, 0x00cb, - "MSI-X: unable to register handler -- %x/%d.\n", - qentry->vector, ret); - qla24xx_disable_msix(ha); - ha->mqenable = 0; - goto msix_out; - } + if (ret) + goto msix_register_fail; + qentry->have_irq = 1; + qentry->rsp = rsp; + rsp->msix = qentry; + } + + /* + * If target mode is enable, also request the vector for the ATIO + * queue. + */ + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { + qentry = &ha->msix_entries[ATIO_VECTOR]; + ret = request_irq(qentry->vector, + qla83xx_msix_entries[ATIO_VECTOR].handler, + 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; } +msix_register_fail: + if (ret) { + ql_log(ql_log_fatal, vha, 0x00cb, + "MSI-X: unable to register handler -- %x/%d.\n", + qentry->vector, ret); + qla24xx_disable_msix(ha); + ha->mqenable = 0; + goto msix_out; + } + /* Enable MSI-X vector for response queue update for queue 0 */ - if (IS_QLA83XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { if (ha->msixbase && ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; @@ -2933,13 +3028,14 @@ msix_out: int qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) { - int ret; - device_reg_t __iomem *reg = ha->iobase; + int ret = QLA_FUNCTION_FAILED; + device_reg_t *reg = ha->iobase; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* If possible, enable MSI-X. */ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && - !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) + !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && + !IS_QLA27XX(ha)) goto skip_msi; if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && @@ -2967,12 +3063,15 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) ha->chip_revision, ha->fw_attributes); goto clear_risc_ints; } - ql_log(ql_log_info, vha, 0x0037, - "MSI-X Falling back-to MSI mode -%d.\n", ret); + skip_msix: + ql_log(ql_log_info, vha, 0x0037, + "Falling back-to MSI mode -%d.\n", ret); + if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && - !IS_QLA8001(ha) && !IS_QLA82XX(ha)) + !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && + !IS_QLA27XX(ha)) goto skip_msi; ret = pci_enable_msi(ha->pdev); @@ -2982,14 +3081,13 @@ skip_msix: ha->flags.msi_enabled = 1; } else ql_log(ql_log_warn, vha, 0x0039, - "MSI-X; Falling back-to INTa mode -- %d.\n", ret); + "Falling back-to INTa mode -- %d.\n", ret); +skip_msi: /* Skip INTx on ISP82xx. */ if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) return QLA_FUNCTION_FAILED; -skip_msi: - ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, ha->flags.msi_enabled ? 0 : IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); @@ -2998,9 +3096,11 @@ skip_msi: "Failed to reserve interrupt %d already in use.\n", ha->pdev->irq); goto fail; - } else if (!ha->flags.msi_enabled) + } else if (!ha->flags.msi_enabled) { ql_dbg(ql_dbg_init, vha, 0x0125, "INTa mode: Enabled.\n"); + ha->flags.mr_intr_valid = 1; + } clear_risc_ints: diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 186dd59ce4f..1c33a77db5c 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -35,7 +35,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) { int rval; unsigned long flags = 0; - device_reg_t __iomem *reg; + device_reg_t *reg; uint8_t abort_active; uint8_t io_lock_on; uint16_t command = 0; @@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) { + if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; ql_log(ql_log_warn, vha, 0x1004, @@ -106,9 +106,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) spin_lock_irqsave(&ha->hardware_lock, flags); /* Load mailbox registers. */ - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; - else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha)) + else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) optr = (uint16_t __iomem *)®->isp24.mailbox0; else optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); @@ -117,33 +117,25 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) command = mcp->mb[0]; mboxes = mcp->out_mb; + ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, + "Mailbox registers (OUT):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); - if (mboxes & BIT_0) + if (mboxes & BIT_0) { + ql_dbg(ql_dbg_mbx, vha, 0x1112, + "mbox[%d]<-0x%04x\n", cnt, *iptr); WRT_REG_WORD(optr, *iptr); + } mboxes >>= 1; optr++; iptr++; } - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, - "Loaded MBX registers (displayed in bytes) =.\n"); - ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112, - (uint8_t *)mcp->mb, 16); - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113, - ".\n"); - ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114, - ((uint8_t *)mcp->mb + 0x10), 16); - ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115, - ".\n"); - ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116, - ((uint8_t *)mcp->mb + 0x20), 8); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, "I/O Address = %p.\n", optr); - ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e); /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; @@ -159,7 +151,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (RD_REG_DWORD(®->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, @@ -177,15 +169,19 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); - wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); - - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - + if (!wait_for_completion_timeout(&ha->mbx_intr_comp, + mcp->tov * HZ)) { + ql_dbg(ql_dbg_mbx, vha, 0x117a, + "cmd=%x Timeout.\n", command); + spin_lock_irqsave(&ha->hardware_lock, flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + } } else { ql_dbg(ql_dbg_mbx, vha, 0x1011, "Cmd=%x Polling Mode.\n", command); - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (RD_REG_DWORD(®->isp82.hint) & HINT_MBX_INT_PENDING) { spin_unlock_irqrestore(&ha->hardware_lock, @@ -232,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) { + if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) { ha->flags.mbox_busy = 0; /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; @@ -250,9 +246,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) iptr2 = mcp->mb; iptr = (uint16_t *)&ha->mailbox_out[0]; mboxes = mcp->in_mb; + + ql_dbg(ql_dbg_mbx, vha, 0x1113, + "Mailbox registers (IN):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { - if (mboxes & BIT_0) + if (mboxes & BIT_0) { *iptr2 = *iptr; + ql_dbg(ql_dbg_mbx, vha, 0x1114, + "mbox[%d]->0x%04x\n", cnt, *iptr2); + } mboxes >>= 1; iptr2++; @@ -277,9 +279,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* * Attempt to capture a firmware dump for further analysis - * of the current firmware state + * of the current firmware state. We do not need to do this + * if we are intentionally generating a dump. */ - ha->isp_ops->fw_dump(vha, 0); + if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) + ha->isp_ops->fw_dump(vha, 0); rval = QLA_FUNCTION_TIMEOUT; } @@ -464,7 +468,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[1] = MSW(risc_addr); mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) { + if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || + IS_QLA27XX(ha)) { struct nvram_81xx *nv = ha->nvram; mcp->mb[4] = (nv->enhanced_features & EXTENDED_BB_CREDITS); @@ -531,10 +536,12 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; mcp->out_mb = MBX_0; mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; - if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha)) + if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha)) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; + if (IS_QLA27XX(ha)) + mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); @@ -550,7 +557,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ else ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; - if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) { + if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; ha->mpi_version[2] = mcp->mb[11] & 0xff; @@ -570,6 +577,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", __func__, mcp->mb[17], mcp->mb[16]); } + if (IS_QLA27XX(ha)) { + ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; + ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; + } failed: if (rval != QLA_SUCCESS) { @@ -1195,7 +1206,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c, "Entered %s.\n", __func__); - if (IS_QLA82XX(ha) && ql2xdbwr) + if (IS_P3P_TYPE(ha) && ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); @@ -1210,7 +1221,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) { + if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { mcp->mb[1] = BIT_0; mcp->mb[10] = MSW(ha->ex_init_cb_dma); mcp->mb[11] = LSW(ha->ex_init_cb_dma); @@ -1221,7 +1232,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) } /* 1 and 2 should normally be captured. */ mcp->in_mb = MBX_2|MBX_1|MBX_0; - if (IS_QLA83XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) /* mb3 is additional info about the installed SFP. */ mcp->in_mb |= MBX_3; mcp->buf_size = size; @@ -1308,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) left = 0; - list = kzalloc(dma_size, GFP_KERNEL); + list = kmemdup(pmap, dma_size, GFP_KERNEL); if (!list) { ql_log(ql_log_warn, vha, 0x1140, "%s(%ld): failed to allocate node names list " @@ -1317,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) goto out_free; } - memcpy(list, pmap, dma_size); restart: dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); } @@ -1661,7 +1671,11 @@ qla24xx_link_initialize(scsi_qla_host_t *vha) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_LINK_INITIALIZATION; - mcp->mb[1] = BIT_6|BIT_4; + mcp->mb[1] = BIT_4; + if (vha->hw->operating_mode == LOOP) + mcp->mb[1] |= BIT_6; + else + mcp->mb[1] |= BIT_5; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; @@ -2341,7 +2355,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) + if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw)) mcp->in_mb |= MBX_12; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; @@ -2582,6 +2596,9 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, "Entered %s.\n", __func__); + if (ql2xasynctmfenable) + return qla24xx_async_abort_command(sp); + spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < req->num_outstanding_cmds; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -2626,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx, vha, 0x1090, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(abt->nport_handle)); - rval = QLA_FUNCTION_FAILED; + if (abt->nport_handle == CS_IOCB_ERROR) + rval = QLA_FUNCTION_PARAMETER_ERROR; + else + rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, "Done %s.\n", __func__); @@ -2792,6 +2812,147 @@ qla2x00_system_error(scsi_qla_host_t *vha) return rval; } +int +qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA2031(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_WRITE_SERDES; + mcp->mb[1] = addr; + mcp->mb[2] = data & 0xff; + mcp->mb[3] = 0; + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1183, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA2031(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_READ_SERDES; + mcp->mb[1] = addr; + mcp->mb[3] = 0; + mcp->out_mb = MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + *data = mcp->mb[1] & 0xff; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1186, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA8044(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; + mcp->mb[1] = HCS_WRITE_SERDES; + mcp->mb[3] = LSW(addr); + mcp->mb[4] = MSW(addr); + mcp->mb[5] = LSW(data); + mcp->mb[6] = MSW(data); + mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1187, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA8044(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; + mcp->mb[1] = HCS_READ_SERDES; + mcp->mb[3] = LSW(addr); + mcp->mb[4] = MSW(addr); + mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + *data = mcp->mb[2] << 16 | mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x118a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, + "Done %s.\n", __func__); + } + + return rval; +} + /** * qla2x00_set_serdes_params() - * @ha: HA context @@ -2955,7 +3116,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, "Entered %s.\n", __func__); if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && - !IS_QLA83XX(vha->hw)) + !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) @@ -3568,12 +3729,14 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, "Entered %s.\n", __func__); + if (IS_SHADOW_REG_CAPABLE(ha)) + req->options |= BIT_13; + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; mcp->mb[2] = MSW(LSD(req->dma)); @@ -3586,26 +3749,23 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; - if (IS_QLA83XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) mcp->mb[15] = 0; - reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + - QLA_QUE_PAGE * req->id); - mcp->mb[4] = req->id; /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ - mcp->mb[9] = 0; + mcp->mb[9] = *req->out_ptr = 0; mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) mcp->in_mb |= MBX_1; - if (IS_QLA83XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { mcp->out_mb |= MBX_15; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; @@ -3613,12 +3773,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { - WRT_REG_DWORD(®->req_q_in, 0); - if (!IS_QLA83XX(ha)) - WRT_REG_DWORD(®->req_q_out, 0); + WRT_REG_DWORD(req->req_q_in, 0); + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + WRT_REG_DWORD(req->req_q_out, 0); } - req->req_q_in = ®->req_q_in; - req->req_q_out = ®->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); @@ -3640,12 +3798,14 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) unsigned long flags; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - struct device_reg_25xxmq __iomem *reg; struct qla_hw_data *ha = vha->hw; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, "Entered %s.\n", __func__); + if (IS_SHADOW_REG_CAPABLE(ha)) + rsp->options |= BIT_13; + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; mcp->mb[2] = MSW(LSD(rsp->dma)); @@ -3655,15 +3815,12 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[5] = rsp->length; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; - if (IS_QLA83XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) mcp->mb[15] = 0; - reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) + - QLA_QUE_PAGE * rsp->id); - mcp->mb[4] = rsp->id; /* que in ptr index */ - mcp->mb[8] = 0; + mcp->mb[8] = *rsp->in_ptr = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 @@ -3675,7 +3832,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) if (IS_QLA81XX(ha)) { mcp->out_mb |= MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; - } else if (IS_QLA83XX(ha)) { + } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; /* debug q create issue in SR-IOV */ @@ -3684,9 +3841,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { - WRT_REG_DWORD(®->rsp_q_out, 0); + WRT_REG_DWORD(rsp->rsp_q_out, 0); if (!IS_QLA83XX(ha)) - WRT_REG_DWORD(®->rsp_q_in, 0); + WRT_REG_DWORD(rsp->rsp_q_in, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3742,7 +3899,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, "Entered %s.\n", __func__); - if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) + if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && + !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; @@ -3773,7 +3931,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) + if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && + !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, @@ -3807,7 +3966,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) + if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && + !IS_QLA27XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, @@ -3867,7 +4027,54 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) } int -qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) +qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int i; + int len; + uint16_t *str; + struct qla_hw_data *ha = vha->hw; + + if (!IS_P3P_TYPE(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, + "Entered %s.\n", __func__); + + str = (void *)version; + len = strlen(version); + + mcp->mb[0] = MBC_SET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; + mcp->out_mb = MBX_1|MBX_0; + for (i = 4; i < 16 && len; i++, str++, len -= 2) { + mcp->mb[i] = cpu_to_le16p(str); + mcp->out_mb |= 1<<i; + } + for (; i < 16; i++) { + mcp->mb[i] = 0; + mcp->out_mb |= 1<<i; + } + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x117c, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version) { int rval; mbx_cmd_t mc; @@ -3878,22 +4085,23 @@ qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) dma_addr_t str_dma; struct qla_hw_data *ha = vha->hw; - if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) + if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) || + IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e, "Entered %s.\n", __func__); str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); if (!str) { - ql_log(ql_log_warn, vha, 0x1156, + ql_log(ql_log_warn, vha, 0x117f, "Failed to allocate driver version param.\n"); return QLA_MEMORY_ALLOC_FAILED; } memcpy(str, "\x7\x3\x11\x0", 4); dwlen = str[0]; - len = dwlen * sizeof(uint32_t) - 4; + len = dwlen * 4 - 4; memset(str + 4, 0, len); if (len > strlen(version)) len = strlen(version); @@ -3906,16 +4114,16 @@ qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) mcp->mb[6] = MSW(MSD(str_dma)); mcp->mb[7] = LSW(MSD(str_dma)); mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_mbx, vha, 0x1157, - "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + ql_dbg(ql_dbg_mbx, vha, 0x1180, + "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]); } else { - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181, "Done %s.\n", __func__); } @@ -4171,7 +4379,6 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - uint32_t iter_cnt = 0x1; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7, "Entered %s.\n", __func__); @@ -4197,8 +4404,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq, mcp->mb[7] = MSW(MSD(mreq->rcv_dma)); /* Iteration count */ - mcp->mb[18] = LSW(iter_cnt); - mcp->mb[19] = MSW(iter_cnt); + mcp->mb[18] = LSW(mreq->iteration_count); + mcp->mb[19] = MSW(mreq->iteration_count); mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; @@ -4431,7 +4638,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mcp->mb[1] = 0; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; - if (IS_QLA83XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; @@ -4460,7 +4667,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, "Entered %s.\n", __func__); - if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && + !IS_QLA27XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_PORT_CONFIG; mcp->out_mb = MBX_0; @@ -4565,39 +4773,43 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp) struct qla_hw_data *ha = vha->hw; uint8_t byte; - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca, - "Entered %s.\n", __func__); - - if (ha->thermal_support & THERMAL_SUPPORT_I2C) { - rval = qla2x00_read_sfp(vha, 0, &byte, - 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0); - *temp = byte; - if (rval == QLA_SUCCESS) - goto done; - - ql_log(ql_log_warn, vha, 0x10c9, - "Thermal not supported by I2C.\n"); - ha->thermal_support &= ~THERMAL_SUPPORT_I2C; + if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x1150, + "Thermal not supported by this card.\n"); + return rval; } - if (ha->thermal_support & THERMAL_SUPPORT_ISP) { - rval = qla2x00_read_asic_temperature(vha, temp); - if (rval == QLA_SUCCESS) - goto done; - - ql_log(ql_log_warn, vha, 0x1019, - "Thermal not supported by ISP.\n"); - ha->thermal_support &= ~THERMAL_SUPPORT_ISP; + if (IS_QLA25XX(ha)) { + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + ha->pdev->subsystem_device == 0x0175) { + rval = qla2x00_read_sfp(vha, 0, &byte, + 0x98, 0x1, 1, BIT_13|BIT_0); + *temp = byte; + return rval; + } + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && + ha->pdev->subsystem_device == 0x338e) { + rval = qla2x00_read_sfp(vha, 0, &byte, + 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0); + *temp = byte; + return rval; + } + ql_dbg(ql_dbg_mbx, vha, 0x10c9, + "Thermal not supported by this card.\n"); + return rval; } - ql_log(ql_log_warn, vha, 0x1150, - "Thermal not supported by this card " - "(ignoring further requests).\n"); - return rval; + if (IS_QLA82XX(ha)) { + *temp = qla82xx_read_temperature(vha); + rval = QLA_SUCCESS; + return rval; + } else if (IS_QLA8044(ha)) { + *temp = qla8044_read_temperature(vha); + rval = QLA_SUCCESS; + return rval; + } -done: - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018, - "Done %s.\n", __func__); + rval = qla2x00_read_asic_temperature(vha, temp); return rval; } @@ -4647,7 +4859,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d, "Entered %s.\n", __func__); - if (!IS_QLA82XX(ha)) + if (!IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; memset(mcp, 0, sizeof(mbx_cmd_t)); @@ -4765,6 +4977,60 @@ qla82xx_md_get_template(scsi_qla_host_t *vha) } int +qla8044_md_get_template(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + int offset = 0, size = MINIDUMP_SIZE_36K; + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f, + "Entered %s.\n", __func__); + + ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, + ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); + if (!ha->md_tmplt_hdr) { + ql_log(ql_log_warn, vha, 0xb11b, + "Unable to allocate memory for Minidump template.\n"); + return rval; + } + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + while (offset < ha->md_template_size) { + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT); + mcp->mb[3] = MSW(RQST_TMPLT); + mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset)); + mcp->mb[8] = LSW(size); + mcp->mb[9] = MSW(size); + mcp->mb[10] = offset & 0x0000FFFF; + mcp->mb[11] = offset & 0xFFFF0000; + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xb11c, + "mailbox command FAILED=0x%x, subcode=%x.\n", + ((mcp->mb[1] << 16) | mcp->mb[0]), + ((mcp->mb[3] << 16) | mcp->mb[2])); + return rval; + } else + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d, + "Done %s.\n", __func__); + offset = offset + size; + } + return rval; +} + +int qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) { int rval; @@ -4860,7 +5126,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA82XX(ha)) + if (!IS_P3P_TYPE(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127, @@ -4898,7 +5164,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, @@ -4973,7 +5239,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) struct qla_hw_data *ha = vha->hw; unsigned long retry_max_time = jiffies + (2 * HZ); - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f868a9f98af..89998244f48 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -630,7 +630,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; - device_reg_t __iomem *reg; + device_reg_t *reg; uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); @@ -699,6 +699,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); + req->req_q_in = ®->isp25mq.req_q_in; + req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, @@ -752,7 +754,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, struct rsp_que *rsp = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; - device_reg_t __iomem *reg; + device_reg_t *reg; rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (rsp == NULL) { diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c new file mode 100644 index 00000000000..abeb3901498 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -0,0 +1,3472 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#include "qla_def.h" +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/ratelimit.h> +#include <linux/vmalloc.h> +#include <scsi/scsi_tcq.h> +#include <linux/utsname.h> + + +/* QLAFX00 specific Mailbox implementation functions */ + +/* + * qlafx00_mailbox_command + * Issue mailbox command and waits for completion. + * + * Input: + * ha = adapter block pointer. + * mcp = driver internal mbx struct pointer. + * + * Output: + * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data. + * + * Returns: + * 0 : QLA_SUCCESS = cmd performed success + * 1 : QLA_FUNCTION_FAILED (error encountered) + * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered) + * + * Context: + * Kernel context. + */ +static int +qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) + +{ + int rval; + unsigned long flags = 0; + device_reg_t *reg; + uint8_t abort_active; + uint8_t io_lock_on; + uint16_t command = 0; + uint32_t *iptr; + uint32_t __iomem *optr; + uint32_t cnt; + uint32_t mboxes; + unsigned long wait_time; + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + if (ha->pdev->error_state > pci_channel_io_frozen) { + ql_log(ql_log_warn, vha, 0x115c, + "error_state is greater than pci_channel_io_frozen, " + "exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_log(ql_log_warn, vha, 0x115f, + "Device in failed state, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + reg = ha->iobase; + io_lock_on = base_vha->flags.init_done; + + rval = QLA_SUCCESS; + abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + + if (ha->flags.pci_channel_io_perm_failure) { + ql_log(ql_log_warn, vha, 0x1175, + "Perm failure on EEH timeout MBX, exiting.\n"); + return QLA_FUNCTION_TIMEOUT; + } + + if (ha->flags.isp82xx_fw_hung) { + /* Setting Link-Down error */ + mcp->mb[0] = MBS_LINK_DOWN_ERROR; + ql_log(ql_log_warn, vha, 0x1176, + "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); + rval = QLA_FUNCTION_FAILED; + goto premature_exit; + } + + /* + * Wait for active mailbox commands to finish by waiting at most tov + * seconds. This is to serialize actual issuing of mailbox cmds during + * non ISP abort time. + */ + if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { + /* Timeout occurred. Return error. */ + ql_log(ql_log_warn, vha, 0x1177, + "Cmd access timeout, cmd=0x%x, Exiting.\n", + mcp->mb[0]); + return QLA_FUNCTION_TIMEOUT; + } + + ha->flags.mbox_busy = 1; + /* Save mailbox command for debug */ + ha->mcp32 = mcp; + + ql_dbg(ql_dbg_mbx, vha, 0x1178, + "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); + + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Load mailbox registers. */ + optr = (uint32_t __iomem *)®->ispfx00.mailbox0; + + iptr = mcp->mb; + command = mcp->mb[0]; + mboxes = mcp->out_mb; + + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) + WRT_REG_DWORD(optr, *iptr); + + mboxes >>= 1; + optr++; + iptr++; + } + + /* Issue set host interrupt command to send cmd out. */ + ha->flags.mbox_int = 0; + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172, + (uint8_t *)mcp->mb, 16); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173, + ((uint8_t *)mcp->mb + 0x10), 16); + ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174, + ((uint8_t *)mcp->mb + 0x20), 8); + + /* Unlock mbx registers and wait for interrupt */ + ql_dbg(ql_dbg_mbx, vha, 0x1179, + "Going to unlock irq & waiting for interrupts. " + "jiffies=%lx.\n", jiffies); + + /* Wait for mbx cmd completion until timeout */ + if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { + set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + + QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x112c, + "Cmd=%x Polling Mode.\n", command); + + QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ + while (!ha->flags.mbox_int) { + if (time_after(jiffies, wait_time)) + break; + + /* Check for pending interrupts. */ + qla2x00_poll(ha->rsp_q_map[0]); + + if (!ha->flags.mbox_int && + !(IS_QLA2200(ha) && + command == MBC_LOAD_RISC_RAM_EXTENDED)) + usleep_range(10000, 11000); + } /* while */ + ql_dbg(ql_dbg_mbx, vha, 0x112d, + "Waited %d sec.\n", + (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); + } + + /* Check whether we timed out */ + if (ha->flags.mbox_int) { + uint32_t *iptr2; + + ql_dbg(ql_dbg_mbx, vha, 0x112e, + "Cmd=%x completed.\n", command); + + /* Got interrupt. Clear the flag. */ + ha->flags.mbox_int = 0; + clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE) + rval = QLA_FUNCTION_FAILED; + + /* Load return mailbox registers. */ + iptr2 = mcp->mb; + iptr = (uint32_t *)&ha->mailbox_out32[0]; + mboxes = mcp->in_mb; + for (cnt = 0; cnt < ha->mbx_count; cnt++) { + if (mboxes & BIT_0) + *iptr2 = *iptr; + + mboxes >>= 1; + iptr2++; + iptr++; + } + } else { + + rval = QLA_FUNCTION_TIMEOUT; + } + + ha->flags.mbox_busy = 0; + + /* Clean up */ + ha->mcp32 = NULL; + + if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { + ql_dbg(ql_dbg_mbx, vha, 0x113a, + "checking for additional resp interrupt.\n"); + + /* polling mode for non isp_abort commands. */ + qla2x00_poll(ha->rsp_q_map[0]); + } + + if (rval == QLA_FUNCTION_TIMEOUT && + mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) { + if (!io_lock_on || (mcp->flags & IOCTL_CMD) || + ha->flags.eeh_busy) { + /* not in dpc. schedule it for dpc to take over. */ + ql_dbg(ql_dbg_mbx, vha, 0x115d, + "Timeout, schedule isp_abort_needed.\n"); + + if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && + !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + + ql_log(ql_log_info, base_vha, 0x115e, + "Mailbox cmd timeout occurred, cmd=0x%x, " + "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " + "abort.\n", command, mcp->mb[0], + ha->flags.eeh_busy); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else if (!abort_active) { + /* call abort directly since we are in the DPC thread */ + ql_dbg(ql_dbg_mbx, vha, 0x1160, + "Timeout, calling abort_isp.\n"); + + if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && + !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && + !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { + + ql_log(ql_log_info, base_vha, 0x1161, + "Mailbox cmd timeout occurred, cmd=0x%x, " + "mb[0]=0x%x. Scheduling ISP abort ", + command, mcp->mb[0]); + + set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (ha->isp_ops->abort_isp(vha)) { + /* Failed. retry later. */ + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); + ql_dbg(ql_dbg_mbx, vha, 0x1162, + "Finished abort_isp.\n"); + } + } + } + +premature_exit: + /* Allow next mbx cmd to come in. */ + complete(&ha->mbx_cmd_comp); + + if (rval) { + ql_log(ql_log_warn, base_vha, 0x1163, + "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, " + "mb[3]=%x, cmd=%x ****.\n", + mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); + } else { + ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qlafx00_driver_shutdown + * Indicate a driver shutdown to firmware. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * local function return status code. + * + * Context: + * Kernel context. + */ +int +qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_MR_DRV_SHUTDOWN; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0; + if (tmo) + mcp->tov = tmo; + else + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qlafx00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1167, + "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168, + "Done %s.\n", __func__); + } + + return rval; +} + +/* + * qlafx00_get_firmware_state + * Get adapter firmware state. + * + * Input: + * ha = adapter block pointer. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qla7xxx local function return status code. + * + * Context: + * Kernel context. + */ +static int +qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_GET_FIRMWARE_STATE; + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qlafx00_mailbox_command(vha, mcp); + + /* Return firmware states. */ + states[0] = mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x116a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b, + "Done %s.\n", __func__); + } + return rval; +} + +/* + * qlafx00_init_firmware + * Initialize adapter firmware. + * + * Input: + * ha = adapter block pointer. + * dptr = Initialization control block pointer. + * size = size of initialization control block. + * TARGET_QUEUE_LOCK must be released. + * ADAPTER_STATE_LOCK must be released. + * + * Returns: + * qlafx00 local function return status code. + * + * Context: + * Kernel context. + */ +int +qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; + + mcp->mb[1] = 0; + mcp->mb[2] = MSD(ha->init_cb_dma); + mcp->mb[3] = LSD(ha->init_cb_dma); + + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_OUT; + mcp->tov = MBX_TOV_SECONDS; + rval = qlafx00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x116d, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e, + "Done %s.\n", __func__); + } + return rval; +} + +/* + * qlafx00_mbx_reg_test + */ +static int +qlafx00_mbx_reg_test(scsi_qla_host_t *vha) +{ + int rval; + struct mbx_cmd_32 mc; + struct mbx_cmd_32 *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f, + "Entered %s.\n", __func__); + + + mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; + mcp->mb[1] = 0xAAAA; + mcp->mb[2] = 0x5555; + mcp->mb[3] = 0xAA55; + mcp->mb[4] = 0x55AA; + mcp->mb[5] = 0xA5A5; + mcp->mb[6] = 0x5A5A; + mcp->mb[7] = 0x2525; + mcp->mb[8] = 0xBBBB; + mcp->mb[9] = 0x6666; + mcp->mb[10] = 0xBB66; + mcp->mb[11] = 0x66BB; + mcp->mb[12] = 0xB6B6; + mcp->mb[13] = 0x6B6B; + mcp->mb[14] = 0x3636; + mcp->mb[15] = 0xCCCC; + + + mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->buf_size = 0; + mcp->flags = MBX_DMA_OUT; + mcp->tov = MBX_TOV_SECONDS; + rval = qlafx00_mailbox_command(vha, mcp); + if (rval == QLA_SUCCESS) { + if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 || + mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA) + rval = QLA_FUNCTION_FAILED; + if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A || + mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB) + rval = QLA_FUNCTION_FAILED; + if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 || + mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6) + rval = QLA_FUNCTION_FAILED; + if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 || + mcp->mb[31] != 0xCCCC) + rval = QLA_FUNCTION_FAILED; + } + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1170, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171, + "Done %s.\n", __func__); + } + return rval; +} + +/** + * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers. + * @ha: HA context + * + * Returns 0 on success. + */ +int +qlafx00_pci_config(scsi_qla_host_t *vha) +{ + uint16_t w; + struct qla_hw_data *ha = vha->hw; + + pci_set_master(ha->pdev); + pci_try_set_mwi(ha->pdev); + + pci_read_config_word(ha->pdev, PCI_COMMAND, &w); + w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); + w &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(ha->pdev, PCI_COMMAND, w); + + /* PCIe -- adjust Maximum Read Request Size (2048). */ + if (pci_is_pcie(ha->pdev)) + pcie_set_readrq(ha->pdev, 2048); + + ha->chip_revision = ha->pdev->revision; + + return QLA_SUCCESS; +} + +/** + * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC). + * @ha: HA context + * + */ +static inline void +qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) +{ + unsigned long flags = 0; + struct qla_hw_data *ha = vha->hw; + int i, core; + uint32_t cnt; + uint32_t reg_val; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); + QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); + + /* stop the XOR DMA engines */ + QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); + + /* stop the IDMA engines */ + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); + + for (i = 0; i < 100000; i++) { + if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && + (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) + break; + udelay(100); + } + + /* Set all 4 cores in reset */ + for (i = 0; i < 4; i++) { + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); + } + + /* Reset all units in Fabric */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); + + /* */ + QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); + QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); + + /* Set all 4 core Memory Power Down Registers */ + for (i = 0; i < 5; i++) { + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); + } + + /* Reset all interrupt control registers */ + for (i = 0; i < 115; i++) { + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0)); + } + + /* Reset Timers control registers. per core */ + for (core = 0; core < 4; core++) + for (i = 0; i < 8; i++) + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0)); + + /* Reset per core IRQ ack register */ + for (core = 0; core < 4; core++) + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF)); + + /* Set Fabric control and config to defaults */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); + + /* Kick in Fabric units */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); + + /* Kick in Core0 to start boot process */ + QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Wait 10secs for soft-reset to complete. */ + for (cnt = 10; cnt; cnt--) { + msleep(1000); + barrier(); + } +} + +/** + * qlafx00_soft_reset() - Soft Reset ISPFx00. + * @ha: HA context + * + * Returns 0 on success. + */ +void +qlafx00_soft_reset(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (unlikely(pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure)) + return; + + ha->isp_ops->disable_intrs(ha); + qlafx00_soc_cpu_reset(vha); +} + +/** + * qlafx00_chip_diag() - Test ISPFx00 for proper operation. + * @ha: HA context + * + * Returns 0 on success. + */ +int +qlafx00_chip_diag(scsi_qla_host_t *vha) +{ + int rval = 0; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + + ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; + + rval = qlafx00_mbx_reg_test(vha); + if (rval) { + ql_log(ql_log_warn, vha, 0x1165, + "Failed mailbox send register test\n"); + } else { + /* Flag a successful rval */ + rval = QLA_SUCCESS; + } + return rval; +} + +void +qlafx00_config_rings(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + + WRT_REG_DWORD(®->req_q_in, 0); + WRT_REG_DWORD(®->req_q_out, 0); + + WRT_REG_DWORD(®->rsp_q_in, 0); + WRT_REG_DWORD(®->rsp_q_out, 0); + + /* PCI posting */ + RD_REG_DWORD(®->rsp_q_out); +} + +char * +qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) +{ + struct qla_hw_data *ha = vha->hw; + + if (pci_is_pcie(ha->pdev)) { + strcpy(str, "PCIe iSA"); + return str; + } + return str; +} + +char * +qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str) +{ + struct qla_hw_data *ha = vha->hw; + + sprintf(str, "%s", ha->mr.fw_version); + return str; +} + +void +qlafx00_enable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 1; + QLAFX00_ENABLE_ICNTRL_REG(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +void +qlafx00_disable_intrs(struct qla_hw_data *ha) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&ha->hardware_lock, flags); + ha->interrupts_on = 0; + QLAFX00_DISABLE_ICNTRL_REG(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +int +qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag) +{ + return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); +} + +int +qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag) +{ + return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); +} + +int +qlafx00_loop_reset(scsi_qla_host_t *vha) +{ + int ret; + struct fc_port *fcport; + struct qla_hw_data *ha = vha->hw; + + if (ql2xtargetreset) { + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + + ret = ha->isp_ops->target_reset(fcport, 0, 0); + if (ret != QLA_SUCCESS) { + ql_dbg(ql_dbg_taskm, vha, 0x803d, + "Bus Reset failed: Reset=%d " + "d_id=%x.\n", ret, fcport->d_id.b24); + } + } + } + return QLA_SUCCESS; +} + +int +qlafx00_iospace_config(struct qla_hw_data *ha) +{ + if (pci_request_selected_regions(ha->pdev, ha->bars, + QLA2XXX_DRIVER_NAME)) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x014e, + "Failed to reserve PIO/MMIO regions (%s), aborting.\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + /* Use MMIO operations for all accesses. */ + if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { + ql_log_pci(ql_log_warn, ha->pdev, 0x014f, + "Invalid pci I/O region size (%s).\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) { + ql_log_pci(ql_log_warn, ha->pdev, 0x0127, + "Invalid PCI mem BAR0 region size (%s), aborting\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + ha->cregbase = + ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); + if (!ha->cregbase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, + "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); + goto iospace_error_exit; + } + + if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) { + ql_log_pci(ql_log_warn, ha->pdev, 0x0129, + "region #2 not an MMIO resource (%s), aborting\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) { + ql_log_pci(ql_log_warn, ha->pdev, 0x012a, + "Invalid PCI mem BAR2 region size (%s), aborting\n", + pci_name(ha->pdev)); + goto iospace_error_exit; + } + + ha->iobase = + ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); + if (!ha->iobase) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, + "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); + goto iospace_error_exit; + } + + /* Determine queue resources */ + ha->max_req_queues = ha->max_rsp_queues = 1; + + ql_log_pci(ql_log_info, ha->pdev, 0x012c, + "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n", + ha->bars, ha->cregbase, ha->iobase); + + return 0; + +iospace_error_exit: + return -ENOMEM; +} + +static void +qlafx00_save_queue_ptrs(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + + req->length_fx00 = req->length; + req->ring_fx00 = req->ring; + req->dma_fx00 = req->dma; + + rsp->length_fx00 = rsp->length; + rsp->ring_fx00 = rsp->ring; + rsp->dma_fx00 = rsp->dma; + + ql_dbg(ql_dbg_init, vha, 0x012d, + "req: %p, ring_fx00: %p, length_fx00: 0x%x," + "req->dma_fx00: 0x%llx\n", req, req->ring_fx00, + req->length_fx00, (u64)req->dma_fx00); + + ql_dbg(ql_dbg_init, vha, 0x012e, + "rsp: %p, ring_fx00: %p, length_fx00: 0x%x," + "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00, + rsp->length_fx00, (u64)rsp->dma_fx00); +} + +static int +qlafx00_config_queues(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; + struct rsp_que *rsp = ha->rsp_q_map[0]; + dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); + + req->length = ha->req_que_len; + req->ring = (void *)ha->iobase + ha->req_que_off; + req->dma = bar2_hdl + ha->req_que_off; + if ((!req->ring) || (req->length == 0)) { + ql_log_pci(ql_log_info, ha->pdev, 0x012f, + "Unable to allocate memory for req_ring\n"); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_init, vha, 0x0130, + "req: %p req_ring pointer %p req len 0x%x " + "req off 0x%x\n, req->dma: 0x%llx", + req, req->ring, req->length, + ha->req_que_off, (u64)req->dma); + + rsp->length = ha->rsp_que_len; + rsp->ring = (void *)ha->iobase + ha->rsp_que_off; + rsp->dma = bar2_hdl + ha->rsp_que_off; + if ((!rsp->ring) || (rsp->length == 0)) { + ql_log_pci(ql_log_info, ha->pdev, 0x0131, + "Unable to allocate memory for rsp_ring\n"); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_init, vha, 0x0132, + "rsp: %p rsp_ring pointer %p rsp len 0x%x " + "rsp off 0x%x, rsp->dma: 0x%llx\n", + rsp, rsp->ring, rsp->length, + ha->rsp_que_off, (u64)rsp->dma); + + return QLA_SUCCESS; +} + +static int +qlafx00_init_fw_ready(scsi_qla_host_t *vha) +{ + int rval = 0; + unsigned long wtime; + uint16_t wait_time; /* Wait time */ + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + uint32_t aenmbx, aenmbx7 = 0; + uint32_t pseudo_aen; + uint32_t state[5]; + bool done = false; + + /* 30 seconds wait - Adjust if required */ + wait_time = 30; + + pseudo_aen = RD_REG_DWORD(®->pseudoaen); + if (pseudo_aen == 1) { + aenmbx7 = RD_REG_DWORD(®->initval7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + rval = qlafx00_driver_shutdown(vha, 10); + if (rval != QLA_SUCCESS) + qlafx00_soft_reset(vha); + } + + /* wait time before firmware ready */ + wtime = jiffies + (wait_time * HZ); + do { + aenmbx = RD_REG_DWORD(®->aenmailbox0); + barrier(); + ql_dbg(ql_dbg_mbx, vha, 0x0133, + "aenmbx: 0x%x\n", aenmbx); + + switch (aenmbx) { + case MBA_FW_NOT_STARTED: + case MBA_FW_STARTING: + break; + + case MBA_SYSTEM_ERR: + case MBA_REQ_TRANSFER_ERR: + case MBA_RSP_TRANSFER_ERR: + case MBA_FW_INIT_FAILURE: + qlafx00_soft_reset(vha); + break; + + case MBA_FW_RESTART_CMPLT: + /* Set the mbx and rqstq intr code */ + aenmbx7 = RD_REG_DWORD(®->aenmailbox7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); + ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); + ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); + ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); + WRT_REG_DWORD(®->aenmailbox0, 0); + RD_REG_DWORD_RELAXED(®->aenmailbox0); + ql_dbg(ql_dbg_init, vha, 0x0134, + "f/w returned mbx_intr_code: 0x%x, " + "rqstq_intr_code: 0x%x\n", + ha->mbx_intr_code, ha->rqstq_intr_code); + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + rval = QLA_SUCCESS; + done = true; + break; + + default: + if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS) + break; + + /* If fw is apparently not ready. In order to continue, + * we might need to issue Mbox cmd, but the problem is + * that the DoorBell vector values that come with the + * 8060 AEN are most likely gone by now (and thus no + * bell would be rung on the fw side when mbox cmd is + * issued). We have to therefore grab the 8060 AEN + * shadow regs (filled in by FW when the last 8060 + * AEN was being posted). + * Do the following to determine what is needed in + * order to get the FW ready: + * 1. reload the 8060 AEN values from the shadow regs + * 2. clear int status to get rid of possible pending + * interrupts + * 3. issue Get FW State Mbox cmd to determine fw state + * Set the mbx and rqstq intr code from Shadow Regs + */ + aenmbx7 = RD_REG_DWORD(®->initval7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + ha->req_que_off = RD_REG_DWORD(®->initval1); + ha->rsp_que_off = RD_REG_DWORD(®->initval3); + ha->req_que_len = RD_REG_DWORD(®->initval5); + ha->rsp_que_len = RD_REG_DWORD(®->initval6); + ql_dbg(ql_dbg_init, vha, 0x0135, + "f/w returned mbx_intr_code: 0x%x, " + "rqstq_intr_code: 0x%x\n", + ha->mbx_intr_code, ha->rqstq_intr_code); + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + + /* Get the FW state */ + rval = qlafx00_get_firmware_state(vha, state); + if (rval != QLA_SUCCESS) { + /* Retry if timer has not expired */ + break; + } + + if (state[0] == FSTATE_FX00_CONFIG_WAIT) { + /* Firmware is waiting to be + * initialized by driver + */ + rval = QLA_SUCCESS; + done = true; + break; + } + + /* Issue driver shutdown and wait until f/w recovers. + * Driver should continue to poll until 8060 AEN is + * received indicating firmware recovery. + */ + ql_dbg(ql_dbg_init, vha, 0x0136, + "Sending Driver shutdown fw_state 0x%x\n", + state[0]); + + rval = qlafx00_driver_shutdown(vha, 10); + if (rval != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; + break; + } + msleep(500); + + wtime = jiffies + (wait_time * HZ); + break; + } + + if (!done) { + if (time_after_eq(jiffies, wtime)) { + ql_dbg(ql_dbg_init, vha, 0x0137, + "Init f/w failed: aen[7]: 0x%x\n", + RD_REG_DWORD(®->aenmailbox7)); + rval = QLA_FUNCTION_FAILED; + done = true; + break; + } + /* Delay for a while */ + msleep(500); + } + } while (!done); + + if (rval) + ql_dbg(ql_dbg_init, vha, 0x0138, + "%s **** FAILED ****.\n", __func__); + else + ql_dbg(ql_dbg_init, vha, 0x0139, + "%s **** SUCCESS ****.\n", __func__); + + return rval; +} + +/* + * qlafx00_fw_ready() - Waits for firmware ready. + * @ha: HA context + * + * Returns 0 on success. + */ +int +qlafx00_fw_ready(scsi_qla_host_t *vha) +{ + int rval; + unsigned long wtime; + uint16_t wait_time; /* Wait time if loop is coming ready */ + uint32_t state[5]; + + rval = QLA_SUCCESS; + + wait_time = 10; + + /* wait time before firmware ready */ + wtime = jiffies + (wait_time * HZ); + + /* Wait for ISP to finish init */ + if (!vha->flags.init_done) + ql_dbg(ql_dbg_init, vha, 0x013a, + "Waiting for init to complete...\n"); + + do { + rval = qlafx00_get_firmware_state(vha, state); + + if (rval == QLA_SUCCESS) { + if (state[0] == FSTATE_FX00_INITIALIZED) { + ql_dbg(ql_dbg_init, vha, 0x013b, + "fw_state=%x\n", state[0]); + rval = QLA_SUCCESS; + break; + } + } + rval = QLA_FUNCTION_FAILED; + + if (time_after_eq(jiffies, wtime)) + break; + + /* Delay for a while */ + msleep(500); + + ql_dbg(ql_dbg_init, vha, 0x013c, + "fw_state=%x curr time=%lx.\n", state[0], jiffies); + } while (1); + + + if (rval) + ql_dbg(ql_dbg_init, vha, 0x013d, + "Firmware ready **** FAILED ****.\n"); + else + ql_dbg(ql_dbg_init, vha, 0x013e, + "Firmware ready **** SUCCESS ****.\n"); + + return rval; +} + +static int +qlafx00_find_all_targets(scsi_qla_host_t *vha, + struct list_head *new_fcports) +{ + int rval; + uint16_t tgt_id; + fc_port_t *fcport, *new_fcport; + int found; + struct qla_hw_data *ha = vha->hw; + + rval = QLA_SUCCESS; + + if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) + return QLA_FUNCTION_FAILED; + + if ((atomic_read(&vha->loop_down_timer) || + STATE_TRANSITION(vha))) { + atomic_set(&vha->loop_down_timer, 0); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, + "Listing Target bit map...\n"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, + 0x2089, (uint8_t *)ha->gid_list, 32); + + /* Allocate temporary rmtport for any new rmtports discovered. */ + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) + return QLA_MEMORY_ALLOC_FAILED; + + for_each_set_bit(tgt_id, (void *)ha->gid_list, + QLAFX00_TGT_NODE_LIST_SIZE) { + + /* Send get target node info */ + new_fcport->tgt_id = tgt_id; + rval = qlafx00_fx_disc(vha, new_fcport, + FXDISC_GET_TGT_NODE_INFO); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x208a, + "Target info scan failed -- assuming zero-entry " + "result...\n"); + continue; + } + + /* Locate matching device in database. */ + found = 0; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (memcmp(new_fcport->port_name, + fcport->port_name, WWN_SIZE)) + continue; + + found++; + + /* + * If tgt_id is same and state FCS_ONLINE, nothing + * changed. + */ + if (fcport->tgt_id == new_fcport->tgt_id && + atomic_read(&fcport->state) == FCS_ONLINE) + break; + + /* + * Tgt ID changed or device was marked to be updated. + */ + ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b, + "TGT-ID Change(%s): Present tgt id: " + "0x%x state: 0x%x " + "wwnn = %llx wwpn = %llx.\n", + __func__, fcport->tgt_id, + atomic_read(&fcport->state), + (unsigned long long)wwn_to_u64(fcport->node_name), + (unsigned long long)wwn_to_u64(fcport->port_name)); + + ql_log(ql_log_info, vha, 0x208c, + "TGT-ID Announce(%s): Discovered tgt " + "id 0x%x wwnn = %llx " + "wwpn = %llx.\n", __func__, new_fcport->tgt_id, + (unsigned long long) + wwn_to_u64(new_fcport->node_name), + (unsigned long long) + wwn_to_u64(new_fcport->port_name)); + + if (atomic_read(&fcport->state) != FCS_ONLINE) { + fcport->old_tgt_id = fcport->tgt_id; + fcport->tgt_id = new_fcport->tgt_id; + ql_log(ql_log_info, vha, 0x208d, + "TGT-ID: New fcport Added: %p\n", fcport); + qla2x00_update_fcport(vha, fcport); + } else { + ql_log(ql_log_info, vha, 0x208e, + " Existing TGT-ID %x did not get " + " offline event from firmware.\n", + fcport->old_tgt_id); + qla2x00_mark_device_lost(vha, fcport, 0, 0); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + kfree(new_fcport); + return rval; + } + break; + } + + if (found) + continue; + + /* If device was not in our fcports list, then add it. */ + list_add_tail(&new_fcport->list, new_fcports); + + /* Allocate a new replacement fcport. */ + new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (new_fcport == NULL) + return QLA_MEMORY_ALLOC_FAILED; + } + + kfree(new_fcport); + return rval; +} + +/* + * qlafx00_configure_all_targets + * Setup target devices with node ID's. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + * BIT_0 = error + */ +static int +qlafx00_configure_all_targets(scsi_qla_host_t *vha) +{ + int rval; + fc_port_t *fcport, *rmptemp; + LIST_HEAD(new_fcports); + + rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport, + FXDISC_GET_TGT_NODE_LIST); + if (rval != QLA_SUCCESS) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return rval; + } + + rval = qlafx00_find_all_targets(vha, &new_fcports); + if (rval != QLA_SUCCESS) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + return rval; + } + + /* + * Delete all previous devices marked lost. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { + if (fcport->port_type != FCT_INITIATOR) + qla2x00_mark_device_lost(vha, fcport, 0, 0); + } + } + + /* + * Add the new devices to our devices list. + */ + list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + + qla2x00_update_fcport(vha, fcport); + list_move_tail(&fcport->list, &vha->vp_fcports); + ql_log(ql_log_info, vha, 0x208f, + "Attach new target id 0x%x wwnn = %llx " + "wwpn = %llx.\n", + fcport->tgt_id, + (unsigned long long)wwn_to_u64(fcport->node_name), + (unsigned long long)wwn_to_u64(fcport->port_name)); + } + + /* Free all new device structures not processed. */ + list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { + list_del(&fcport->list); + kfree(fcport); + } + + return rval; +} + +/* + * qlafx00_configure_devices + * Updates Fibre Channel Device Database with what is actually on loop. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success. + * 1 = error. + * 2 = database was full and device was not configured. + */ +int +qlafx00_configure_devices(scsi_qla_host_t *vha) +{ + int rval; + unsigned long flags, save_flags; + rval = QLA_SUCCESS; + + save_flags = flags = vha->dpc_flags; + + ql_dbg(ql_dbg_disc, vha, 0x2090, + "Configure devices -- dpc flags =0x%lx\n", flags); + + rval = qlafx00_configure_all_targets(vha); + + if (rval == QLA_SUCCESS) { + if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { + rval = QLA_FUNCTION_FAILED; + } else { + atomic_set(&vha->loop_state, LOOP_READY); + ql_log(ql_log_info, vha, 0x2091, + "Device Ready\n"); + } + } + + if (rval) { + ql_dbg(ql_dbg_disc, vha, 0x2092, + "%s *** FAILED ***.\n", __func__); + } else { + ql_dbg(ql_dbg_disc, vha, 0x2093, + "%s: exiting normally.\n", __func__); + } + return rval; +} + +static void +qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp) +{ + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + + vha->flags.online = 0; + ha->mr.fw_hbt_en = 0; + + if (!critemp) { + ha->flags.chip_reset_done = 0; + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + vha->qla_stats.total_isp_aborts++; + ql_log(ql_log_info, vha, 0x013f, + "Performing ISP error recovery - ha = %p.\n", ha); + ha->isp_ops->reset_chip(vha); + } + + if (atomic_read(&vha->loop_state) != LOOP_DOWN) { + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, + QLAFX00_LOOP_DOWN_TIME); + } else { + if (!atomic_read(&vha->loop_down_timer)) + atomic_set(&vha->loop_down_timer, + QLAFX00_LOOP_DOWN_TIME); + } + + /* Clear all async request states across all VPs. */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->flags = 0; + if (atomic_read(&fcport->state) == FCS_ONLINE) + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + } + + if (!ha->flags.eeh_busy) { + if (critemp) { + qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); + } else { + /* Requeue all commands in outstanding command list. */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); + } + } + + qla2x00_free_irqs(vha); + if (critemp) + set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags); + else + set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + + /* Clear the Interrupts */ + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + + ql_log(ql_log_info, vha, 0x0140, + "%s Done done - ha=%p.\n", __func__, ha); +} + +/** + * qlafx00_init_response_q_entries() - Initializes response queue entries. + * @ha: HA context + * + * Beginning of request ring has initialization control block already built + * by nvram config routine. + * + * Returns 0 on success. + */ +void +qlafx00_init_response_q_entries(struct rsp_que *rsp) +{ + uint16_t cnt; + response_t *pkt; + + rsp->ring_ptr = rsp->ring; + rsp->ring_index = 0; + rsp->status_srb = NULL; + pkt = rsp->ring_ptr; + for (cnt = 0; cnt < rsp->length; cnt++) { + pkt->signature = RESPONSE_PROCESSED; + WRT_REG_DWORD((void __iomem *)&pkt->signature, + RESPONSE_PROCESSED); + pkt++; + } +} + +int +qlafx00_rescan_isp(scsi_qla_host_t *vha) +{ + uint32_t status = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + uint32_t aenmbx7; + + qla2x00_request_irqs(ha, ha->rsp_q_map[0]); + + aenmbx7 = RD_REG_DWORD(®->aenmailbox7); + ha->mbx_intr_code = MSW(aenmbx7); + ha->rqstq_intr_code = LSW(aenmbx7); + ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); + ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); + ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); + ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); + + ql_dbg(ql_dbg_disc, vha, 0x2094, + "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " + " Req que offset 0x%x Rsp que offset 0x%x\n", + ha->mbx_intr_code, ha->rqstq_intr_code, + ha->req_que_off, ha->rsp_que_len); + + /* Clear the Interrupts */ + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + + status = qla2x00_init_rings(vha); + if (!status) { + vha->flags.online = 1; + + /* if no cable then assume it's good */ + if ((vha->device_flags & DFLG_NO_CABLE)) + status = 0; + /* Register system information */ + if (qlafx00_fx_disc(vha, + &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO)) + ql_dbg(ql_dbg_disc, vha, 0x2095, + "failed to register host info\n"); + } + scsi_unblock_requests(vha->host); + return status; +} + +void +qlafx00_timer_routine(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t fw_heart_beat; + uint32_t aenmbx0; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + uint32_t tempc; + + /* Check firmware health */ + if (ha->mr.fw_hbt_cnt) + ha->mr.fw_hbt_cnt--; + else { + if ((!ha->flags.mr_reset_hdlr_active) && + (!test_bit(UNLOADING, &vha->dpc_flags)) && + (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && + (ha->mr.fw_hbt_en)) { + fw_heart_beat = RD_REG_DWORD(®->fwheartbeat); + if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { + ha->mr.old_fw_hbt_cnt = fw_heart_beat; + ha->mr.fw_hbt_miss_cnt = 0; + } else { + ha->mr.fw_hbt_miss_cnt++; + if (ha->mr.fw_hbt_miss_cnt == + QLAFX00_HEARTBEAT_MISS_CNT) { + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + ha->mr.fw_hbt_miss_cnt = 0; + } + } + } + ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; + } + + if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { + /* Reset recovery to be performed in timer routine */ + aenmbx0 = RD_REG_DWORD(®->aenmailbox0); + if (ha->mr.fw_reset_timer_exp) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + ha->mr.fw_reset_timer_exp = 0; + } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) { + /* Wake up DPC to rescan the targets */ + set_bit(FX00_TARGET_SCAN, &vha->dpc_flags); + clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + } else if ((aenmbx0 == MBA_FW_STARTING) && + (!ha->mr.fw_hbt_en)) { + ha->mr.fw_hbt_en = 1; + } else if (!ha->mr.fw_reset_timer_tick) { + if (aenmbx0 == ha->mr.old_aenmbx0_state) + ha->mr.fw_reset_timer_exp = 1; + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + } else if (aenmbx0 == 0xFFFFFFFF) { + uint32_t data0, data1; + + data0 = QLAFX00_RD_REG(ha, + QLAFX00_BAR1_BASE_ADDR_REG); + data1 = QLAFX00_RD_REG(ha, + QLAFX00_PEX0_WIN0_BASE_ADDR_REG); + + data0 &= 0xffff0000; + data1 &= 0x0000ffff; + + QLAFX00_WR_REG(ha, + QLAFX00_PEX0_WIN0_BASE_ADDR_REG, + (data0 | data1)); + } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) { + ha->mr.fw_reset_timer_tick = + QLAFX00_MAX_RESET_INTERVAL; + } else if (aenmbx0 == MBA_FW_RESET_FCT) { + ha->mr.fw_reset_timer_tick = + QLAFX00_MAX_RESET_INTERVAL; + } + ha->mr.old_aenmbx0_state = aenmbx0; + ha->mr.fw_reset_timer_tick--; + } + if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { + /* + * Critical temperature recovery to be + * performed in timer routine + */ + if (ha->mr.fw_critemp_timer_tick == 0) { + tempc = QLAFX00_GET_TEMPERATURE(ha); + ql_dbg(ql_dbg_timer, vha, 0x6012, + "ISPFx00(%s): Critical temp timer, " + "current SOC temperature: %d\n", + __func__, tempc); + if (tempc < ha->mr.critical_temperature) { + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + clear_bit(FX00_CRITEMP_RECOVERY, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + ha->mr.fw_critemp_timer_tick = + QLAFX00_CRITEMP_INTERVAL; + } else { + ha->mr.fw_critemp_timer_tick--; + } + } + if (ha->mr.host_info_resend) { + /* + * Incomplete host info might be sent to firmware + * durinng system boot - info should be resend + */ + if (ha->mr.hinfo_resend_timer_tick == 0) { + ha->mr.host_info_resend = false; + set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags); + ha->mr.hinfo_resend_timer_tick = + QLAFX00_HINFO_RESEND_INTERVAL; + qla2xxx_wake_dpc(vha); + } else { + ha->mr.hinfo_resend_timer_tick--; + } + } + +} + +/* + * qlfx00a_reset_initialize + * Re-initialize after a iSA device reset. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qlafx00_reset_initialize(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_dbg(ql_dbg_init, vha, 0x0142, + "Device in failed state\n"); + return QLA_SUCCESS; + } + + ha->flags.mr_reset_hdlr_active = 1; + + if (vha->flags.online) { + scsi_block_requests(vha->host); + qlafx00_abort_isp_cleanup(vha, false); + } + + ql_log(ql_log_info, vha, 0x0143, + "(%s): succeeded.\n", __func__); + ha->flags.mr_reset_hdlr_active = 0; + return QLA_SUCCESS; +} + +/* + * qlafx00_abort_isp + * Resets ISP and aborts all outstanding commands. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qlafx00_abort_isp(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.online) { + if (unlikely(pci_channel_offline(ha->pdev) && + ha->flags.pci_channel_io_perm_failure)) { + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + return QLA_SUCCESS; + } + + scsi_block_requests(vha->host); + qlafx00_abort_isp_cleanup(vha, false); + } else { + scsi_block_requests(vha->host); + clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + vha->qla_stats.total_isp_aborts++; + ha->isp_ops->reset_chip(vha); + set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags); + /* Clear the Interrupts */ + QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS); + } + + ql_log(ql_log_info, vha, 0x0145, + "(%s): succeeded.\n", __func__); + + return QLA_SUCCESS; +} + +static inline fc_port_t* +qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id) +{ + fc_port_t *fcport; + + /* Check for matching device in remote port list. */ + fcport = NULL; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->tgt_id == tgt_id) { + ql_dbg(ql_dbg_async, vha, 0x5072, + "Matching fcport(%p) found with TGT-ID: 0x%x " + "and Remote TGT_ID: 0x%x\n", + fcport, fcport->tgt_id, tgt_id); + break; + } + } + return fcport; +} + +static void +qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) +{ + fc_port_t *fcport; + + ql_log(ql_log_info, vha, 0x5073, + "Detach TGT-ID: 0x%x\n", tgt_id); + + fcport = qlafx00_get_fcport(vha, tgt_id); + if (!fcport) + return; + + qla2x00_mark_device_lost(vha, fcport, 0, 0); + + return; +} + +int +qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) +{ + int rval = 0; + uint32_t aen_code, aen_data; + + aen_code = FCH_EVT_VENDOR_UNIQUE; + aen_data = evt->u.aenfx.evtcode; + + switch (evt->u.aenfx.evtcode) { + case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ + if (evt->u.aenfx.mbx[1] == 0) { + if (evt->u.aenfx.mbx[2] == 1) { + if (!vha->flags.fw_tgt_reported) + vha->flags.fw_tgt_reported = 1; + atomic_set(&vha->loop_down_timer, 0); + atomic_set(&vha->loop_state, LOOP_UP); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else if (evt->u.aenfx.mbx[2] == 2) { + qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]); + } + } else if (evt->u.aenfx.mbx[1] == 0xffff) { + if (evt->u.aenfx.mbx[2] == 1) { + if (!vha->flags.fw_tgt_reported) + vha->flags.fw_tgt_reported = 1; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } else if (evt->u.aenfx.mbx[2] == 2) { + vha->device_flags |= DFLG_NO_CABLE; + qla2x00_mark_all_devices_lost(vha, 1); + } + } + break; + case QLAFX00_MBA_LINK_UP: + aen_code = FCH_EVT_LINKUP; + aen_data = 0; + break; + case QLAFX00_MBA_LINK_DOWN: + aen_code = FCH_EVT_LINKDOWN; + aen_data = 0; + break; + case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ + ql_log(ql_log_info, vha, 0x5082, + "Process critical temperature event " + "aenmb[0]: %x\n", + evt->u.aenfx.evtcode); + scsi_block_requests(vha->host); + qlafx00_abort_isp_cleanup(vha, true); + scsi_unblock_requests(vha->host); + break; + } + + fc_host_post_event(vha->host, fc_get_event_number(), + aen_code, aen_data); + + return rval; +} + +static void +qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo) +{ + u64 port_name = 0, node_name = 0; + + port_name = (unsigned long long)wwn_to_u64(pinfo->port_name); + node_name = (unsigned long long)wwn_to_u64(pinfo->node_name); + + fc_host_node_name(vha->host) = node_name; + fc_host_port_name(vha->host) = port_name; + if (!pinfo->port_type) + vha->hw->current_topology = ISP_CFG_F; + if (pinfo->link_status == QLAFX00_LINK_STATUS_UP) + atomic_set(&vha->loop_state, LOOP_READY); + else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN) + atomic_set(&vha->loop_state, LOOP_DOWN); + vha->hw->link_data_rate = (uint16_t)pinfo->link_config; +} + +static void +qla2x00_fxdisc_iocb_timeout(void *data) +{ + srb_t *sp = (srb_t *)data; + struct srb_iocb *lio = &sp->u.iocb_cmd; + + complete(&lio->u.fxiocb.fxiocb_comp); +} + +static void +qla2x00_fxdisc_sp_done(void *data, void *ptr, int res) +{ + srb_t *sp = (srb_t *)ptr; + struct srb_iocb *lio = &sp->u.iocb_cmd; + + complete(&lio->u.fxiocb.fxiocb_comp); +} + +int +qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) +{ + srb_t *sp; + struct srb_iocb *fdisc; + int rval = QLA_FUNCTION_FAILED; + struct qla_hw_data *ha = vha->hw; + struct host_system_info *phost_info; + struct register_host_info *preg_hsi; + struct new_utsname *p_sysid = NULL; + struct timeval tv; + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) + goto done; + + fdisc = &sp->u.iocb_cmd; + switch (fx_type) { + case FXDISC_GET_CONFIG_INFO: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID; + fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data); + break; + case FXDISC_GET_PORT_INFO: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; + fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO; + fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id); + break; + case FXDISC_GET_TGT_NODE_INFO: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; + fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO; + fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id); + break; + case FXDISC_GET_TGT_NODE_LIST: + fdisc->u.fxiocb.flags = + SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID; + fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE; + break; + case FXDISC_REG_HOST_INFO: + fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID; + fdisc->u.fxiocb.req_len = sizeof(struct register_host_info); + p_sysid = utsname(); + if (!p_sysid) { + ql_log(ql_log_warn, vha, 0x303c, + "Not able to get the system information\n"); + goto done_free_sp; + } + break; + case FXDISC_ABORT_IOCTL: + default: + break; + } + + if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { + fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev, + fdisc->u.fxiocb.req_len, + &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL); + if (!fdisc->u.fxiocb.req_addr) + goto done_free_sp; + + if (fx_type == FXDISC_REG_HOST_INFO) { + preg_hsi = (struct register_host_info *) + fdisc->u.fxiocb.req_addr; + phost_info = &preg_hsi->hsi; + memset(preg_hsi, 0, sizeof(struct register_host_info)); + phost_info->os_type = OS_TYPE_LINUX; + strncpy(phost_info->sysname, + p_sysid->sysname, SYSNAME_LENGTH); + strncpy(phost_info->nodename, + p_sysid->nodename, NODENAME_LENGTH); + if (!strcmp(phost_info->nodename, "(none)")) + ha->mr.host_info_resend = true; + strncpy(phost_info->release, + p_sysid->release, RELEASE_LENGTH); + strncpy(phost_info->version, + p_sysid->version, VERSION_LENGTH); + strncpy(phost_info->machine, + p_sysid->machine, MACHINE_LENGTH); + strncpy(phost_info->domainname, + p_sysid->domainname, DOMNAME_LENGTH); + strncpy(phost_info->hostdriver, + QLA2XXX_VERSION, VERSION_LENGTH); + do_gettimeofday(&tv); + preg_hsi->utc = (uint64_t)tv.tv_sec; + ql_dbg(ql_dbg_init, vha, 0x0149, + "ISP%04X: Host registration with firmware\n", + ha->pdev->device); + ql_dbg(ql_dbg_init, vha, 0x014a, + "os_type = '%d', sysname = '%s', nodname = '%s'\n", + phost_info->os_type, + phost_info->sysname, + phost_info->nodename); + ql_dbg(ql_dbg_init, vha, 0x014b, + "release = '%s', version = '%s'\n", + phost_info->release, + phost_info->version); + ql_dbg(ql_dbg_init, vha, 0x014c, + "machine = '%s' " + "domainname = '%s', hostdriver = '%s'\n", + phost_info->machine, + phost_info->domainname, + phost_info->hostdriver); + ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, + (uint8_t *)phost_info, + sizeof(struct host_system_info)); + } + } + + if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { + fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev, + fdisc->u.fxiocb.rsp_len, + &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL); + if (!fdisc->u.fxiocb.rsp_addr) + goto done_unmap_req; + } + + sp->type = SRB_FXIOCB_DCMD; + sp->name = "fxdisc"; + qla2x00_init_timer(sp, FXDISC_TIMEOUT); + fdisc->timeout = qla2x00_fxdisc_iocb_timeout; + fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); + sp->done = qla2x00_fxdisc_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_unmap_dma; + + wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp); + + if (fx_type == FXDISC_GET_CONFIG_INFO) { + struct config_info_data *pinfo = + (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; + strcpy(vha->hw->model_number, pinfo->model_num); + strcpy(vha->hw->model_desc, pinfo->model_description); + memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, + sizeof(vha->hw->mr.symbolic_name)); + memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, + sizeof(vha->hw->mr.serial_num)); + memcpy(&vha->hw->mr.hw_version, pinfo->hw_version, + sizeof(vha->hw->mr.hw_version)); + memcpy(&vha->hw->mr.fw_version, pinfo->fw_version, + sizeof(vha->hw->mr.fw_version)); + strim(vha->hw->mr.fw_version); + memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version, + sizeof(vha->hw->mr.uboot_version)); + memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num, + sizeof(vha->hw->mr.fru_serial_num)); + vha->hw->mr.critical_temperature = + (pinfo->nominal_temp_value) ? + pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD; + ha->mr.extended_io_enabled = (pinfo->enabled_capabilities & + QLAFX00_EXTENDED_IO_EN_MASK) != 0; + } else if (fx_type == FXDISC_GET_PORT_INFO) { + struct port_info_data *pinfo = + (struct port_info_data *) fdisc->u.fxiocb.rsp_addr; + memcpy(vha->node_name, pinfo->node_name, WWN_SIZE); + memcpy(vha->port_name, pinfo->port_name, WWN_SIZE); + vha->d_id.b.domain = pinfo->port_id[0]; + vha->d_id.b.area = pinfo->port_id[1]; + vha->d_id.b.al_pa = pinfo->port_id[2]; + qlafx00_update_host_attr(vha, pinfo); + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, + (uint8_t *)pinfo, 16); + } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { + struct qlafx00_tgt_node_info *pinfo = + (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; + memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE); + memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); + fcport->port_type = FCT_TARGET; + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, + (uint8_t *)pinfo, 16); + } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { + struct qlafx00_tgt_node_info *pinfo = + (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; + ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, + (uint8_t *)pinfo, 16); + memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); + } else if (fx_type == FXDISC_ABORT_IOCTL) + fdisc->u.fxiocb.result = + (fdisc->u.fxiocb.result == + cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ? + cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); + + rval = le32_to_cpu(fdisc->u.fxiocb.result); + +done_unmap_dma: + if (fdisc->u.fxiocb.rsp_addr) + dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len, + fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle); + +done_unmap_req: + if (fdisc->u.fxiocb.req_addr) + dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, + fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); +done_free_sp: + sp->free(vha, sp); +done: + return rval; +} + +/* + * qlafx00_initialize_adapter + * Initialize board. + * + * Input: + * ha = adapter block pointer. + * + * Returns: + * 0 = success + */ +int +qlafx00_initialize_adapter(scsi_qla_host_t *vha) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + uint32_t tempc; + + /* Clear adapter flags. */ + vha->flags.online = 0; + ha->flags.chip_reset_done = 0; + vha->flags.reset_active = 0; + ha->flags.pci_channel_io_perm_failure = 0; + ha->flags.eeh_busy = 0; + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + atomic_set(&vha->loop_state, LOOP_DOWN); + vha->device_flags = DFLG_NO_CABLE; + vha->dpc_flags = 0; + vha->flags.management_server_logged_in = 0; + ha->isp_abort_cnt = 0; + ha->beacon_blink_led = 0; + + set_bit(0, ha->req_qid_map); + set_bit(0, ha->rsp_qid_map); + + ql_dbg(ql_dbg_init, vha, 0x0147, + "Configuring PCI space...\n"); + + rval = ha->isp_ops->pci_config(vha); + if (rval) { + ql_log(ql_log_warn, vha, 0x0148, + "Unable to configure PCI space.\n"); + return rval; + } + + rval = qlafx00_init_fw_ready(vha); + if (rval != QLA_SUCCESS) + return rval; + + qlafx00_save_queue_ptrs(vha); + + rval = qlafx00_config_queues(vha); + if (rval != QLA_SUCCESS) + return rval; + + /* + * Allocate the array of outstanding commands + * now that we know the firmware resources. + */ + rval = qla2x00_alloc_outstanding_cmds(ha, vha->req); + if (rval != QLA_SUCCESS) + return rval; + + rval = qla2x00_init_rings(vha); + ha->flags.chip_reset_done = 1; + + tempc = QLAFX00_GET_TEMPERATURE(ha); + ql_dbg(ql_dbg_init, vha, 0x0152, + "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n", + __func__, tempc); + + return rval; +} + +uint32_t +qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval = QLA_FUNCTION_FAILED; + uint32_t state[1]; + + if (qla2x00_reset_active(vha)) + ql_log(ql_log_warn, vha, 0x70ce, + "ISP reset active.\n"); + else if (!vha->hw->flags.eeh_busy) { + rval = qlafx00_get_firmware_state(vha, state); + } + if (rval != QLA_SUCCESS) + memset(state, -1, sizeof(state)); + + return state[0]; +} + +void +qlafx00_get_host_speed(struct Scsi_Host *shost) +{ + struct qla_hw_data *ha = ((struct scsi_qla_host *) + (shost_priv(shost)))->hw; + u32 speed = FC_PORTSPEED_UNKNOWN; + + switch (ha->link_data_rate) { + case QLAFX00_PORT_SPEED_2G: + speed = FC_PORTSPEED_2GBIT; + break; + case QLAFX00_PORT_SPEED_4G: + speed = FC_PORTSPEED_4GBIT; + break; + case QLAFX00_PORT_SPEED_8G: + speed = FC_PORTSPEED_8GBIT; + break; + case QLAFX00_PORT_SPEED_10G: + speed = FC_PORTSPEED_10GBIT; + break; + } + fc_host_speed(shost) = speed; +} + +/** QLAFX00 specific ISR implementation functions */ + +static inline void +qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, + uint32_t sense_len, struct rsp_que *rsp, int res) +{ + struct scsi_qla_host *vha = sp->fcport->vha; + struct scsi_cmnd *cp = GET_CMD_SP(sp); + uint32_t track_sense_len; + + SET_FW_SENSE_LEN(sp, sense_len); + + if (sense_len >= SCSI_SENSE_BUFFERSIZE) + sense_len = SCSI_SENSE_BUFFERSIZE; + + SET_CMD_SENSE_LEN(sp, sense_len); + SET_CMD_SENSE_PTR(sp, cp->sense_buffer); + track_sense_len = sense_len; + + if (sense_len > par_sense_len) + sense_len = par_sense_len; + + memcpy(cp->sense_buffer, sense_data, sense_len); + + SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len); + + SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); + track_sense_len -= sense_len; + SET_CMD_SENSE_LEN(sp, track_sense_len); + + ql_dbg(ql_dbg_io, vha, 0x304d, + "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n", + sense_len, par_sense_len, track_sense_len); + if (GET_FW_SENSE_LEN(sp) > 0) { + rsp->status_srb = sp; + cp->result = res; + } + + if (sense_len) { + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, + "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", + sp->fcport->vha->host_no, cp->device->id, cp->device->lun, + cp); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, + cp->sense_buffer, sense_len); + } +} + +static void +qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp, + __le16 sstatus, __le16 cpstatus) +{ + struct srb_iocb *tmf; + + tmf = &sp->u.iocb_cmd; + if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) || + (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID))) + cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE); + tmf->u.tmf.comp_status = cpstatus; + sp->done(vha, sp, 0); +} + +static void +qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct abort_iocb_entry_fx00 *pkt) +{ + const char func[] = "ABT_IOCB"; + srb_t *sp; + struct srb_iocb *abt; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + abt = &sp->u.iocb_cmd; + abt->u.abt.comp_status = pkt->tgt_id_sts; + sp->done(vha, sp, 0); +} + +static void +qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct ioctl_iocb_entry_fx00 *pkt) +{ + const char func[] = "IOSB_IOCB"; + srb_t *sp; + struct fc_bsg_job *bsg_job; + struct srb_iocb *iocb_job; + int res; + struct qla_mt_iocb_rsp_fx00 fstatus; + uint8_t *fw_sts_ptr; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (sp->type == SRB_FXIOCB_DCMD) { + iocb_job = &sp->u.iocb_cmd; + iocb_job->u.fxiocb.seq_number = pkt->seq_no; + iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags; + iocb_job->u.fxiocb.result = pkt->status; + if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID) + iocb_job->u.fxiocb.req_data = + pkt->dataword_r; + } else { + bsg_job = sp->u.bsg_job; + + memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00)); + + fstatus.reserved_1 = pkt->reserved_0; + fstatus.func_type = pkt->comp_func_num; + fstatus.ioctl_flags = pkt->fw_iotcl_flags; + fstatus.ioctl_data = pkt->dataword_r; + fstatus.adapid = pkt->adapid; + fstatus.reserved_2 = pkt->dataword_r_extra; + fstatus.res_count = pkt->residuallen; + fstatus.status = pkt->status; + fstatus.seq_number = pkt->seq_no; + memcpy(fstatus.reserved_3, + pkt->reserved_2, 20 * sizeof(uint8_t)); + + fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + + sizeof(struct fc_bsg_reply); + + memcpy(fw_sts_ptr, (uint8_t *)&fstatus, + sizeof(struct qla_mt_iocb_rsp_fx00)); + bsg_job->reply_len = sizeof(struct fc_bsg_reply) + + sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); + + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->fcport->vha, 0x5080, + (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00)); + + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->fcport->vha, 0x5074, + (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); + + res = bsg_job->reply->result = DID_OK << 16; + bsg_job->reply->reply_payload_rcv_len = + bsg_job->reply_payload.payload_len; + } + sp->done(vha, sp, res); +} + +/** + * qlafx00_status_entry() - Process a Status IOCB entry. + * @ha: SCSI driver HA context + * @pkt: Entry pointer + */ +static void +qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) +{ + srb_t *sp; + fc_port_t *fcport; + struct scsi_cmnd *cp; + struct sts_entry_fx00 *sts; + __le16 comp_status; + __le16 scsi_status; + uint16_t ox_id; + __le16 lscsi_status; + int32_t resid; + uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, + fw_resid_len; + uint8_t *rsp_info = NULL, *sense_data = NULL; + struct qla_hw_data *ha = vha->hw; + uint32_t hindex, handle; + uint16_t que; + struct req_que *req; + int logit = 1; + int res = 0; + + sts = (struct sts_entry_fx00 *) pkt; + + comp_status = sts->comp_status; + scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK); + hindex = sts->handle; + handle = LSW(hindex); + + que = MSW(hindex); + req = ha->req_q_map[que]; + + /* Validate handle. */ + if (handle < req->num_outstanding_cmds) + sp = req->outstanding_cmds[handle]; + else + sp = NULL; + + if (sp == NULL) { + ql_dbg(ql_dbg_io, vha, 0x3034, + "Invalid status handle (0x%x).\n", handle); + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + return; + } + + if (sp->type == SRB_TM_CMD) { + req->outstanding_cmds[handle] = NULL; + qlafx00_tm_iocb_entry(vha, req, pkt, sp, + scsi_status, comp_status); + return; + } + + /* Fast path completion. */ + if (comp_status == CS_COMPLETE && scsi_status == 0) { + qla2x00_process_completed_request(vha, req, handle); + return; + } + + req->outstanding_cmds[handle] = NULL; + cp = GET_CMD_SP(sp); + if (cp == NULL) { + ql_dbg(ql_dbg_io, vha, 0x3048, + "Command already returned (0x%x/%p).\n", + handle, sp); + + return; + } + + lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK); + + fcport = sp->fcport; + + ox_id = 0; + sense_len = par_sense_len = rsp_info_len = resid_len = + fw_resid_len = 0; + if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) + sense_len = sts->sense_len; + if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER + | (uint16_t)SS_RESIDUAL_OVER))) + resid_len = le32_to_cpu(sts->residual_len); + if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN)) + fw_resid_len = le32_to_cpu(sts->residual_len); + rsp_info = sense_data = sts->data; + par_sense_len = sizeof(sts->data); + + /* Check for overrun. */ + if (comp_status == CS_COMPLETE && + scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER)) + comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN); + + /* + * Based on Host and scsi status generate status code for Linux + */ + switch (le16_to_cpu(comp_status)) { + case CS_COMPLETE: + case CS_QUEUE_FULL: + if (scsi_status == 0) { + res = DID_OK << 16; + break; + } + if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER + | (uint16_t)SS_RESIDUAL_OVER))) { + resid = resid_len; + scsi_set_resid(cp, resid); + + if (!lscsi_status && + ((unsigned)(scsi_bufflen(cp) - resid) < + cp->underflow)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3050, + "Mid-layer underflow " + "detected (0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + res = DID_ERROR << 16; + break; + } + } + res = DID_OK << 16 | le16_to_cpu(lscsi_status); + + if (lscsi_status == + cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3051, + "QUEUE FULL detected.\n"); + break; + } + logit = 0; + if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) + break; + + memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) + break; + + qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len, + rsp, res); + break; + + case CS_DATA_UNDERRUN: + /* Use F/W calculated residual length. */ + if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) + resid = fw_resid_len; + else + resid = resid_len; + scsi_set_resid(cp, resid); + if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) { + if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) + && fw_resid_len != resid_len) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3052, + "Dropped frame(s) detected " + "(0x%x of 0x%x bytes).\n", + resid, scsi_bufflen(cp)); + + res = DID_ERROR << 16 | + le16_to_cpu(lscsi_status); + goto check_scsi_status; + } + + if (!lscsi_status && + ((unsigned)(scsi_bufflen(cp) - resid) < + cp->underflow)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3053, + "Mid-layer underflow " + "detected (0x%x of 0x%x bytes, " + "cp->underflow: 0x%x).\n", + resid, scsi_bufflen(cp), cp->underflow); + + res = DID_ERROR << 16; + break; + } + } else if (lscsi_status != + cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) && + lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) { + /* + * scsi status of task set and busy are considered + * to be task not completed. + */ + + ql_dbg(ql_dbg_io, fcport->vha, 0x3054, + "Dropped frame(s) detected (0x%x " + "of 0x%x bytes).\n", resid, + scsi_bufflen(cp)); + + res = DID_ERROR << 16 | le16_to_cpu(lscsi_status); + goto check_scsi_status; + } else { + ql_dbg(ql_dbg_io, fcport->vha, 0x3055, + "scsi_status: 0x%x, lscsi_status: 0x%x\n", + scsi_status, lscsi_status); + } + + res = DID_OK << 16 | le16_to_cpu(lscsi_status); + logit = 0; + +check_scsi_status: + /* + * Check to see if SCSI Status is non zero. If so report SCSI + * Status. + */ + if (lscsi_status != 0) { + if (lscsi_status == + cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) { + ql_dbg(ql_dbg_io, fcport->vha, 0x3056, + "QUEUE FULL detected.\n"); + logit = 1; + break; + } + if (lscsi_status != + cpu_to_le16((uint16_t)SS_CHECK_CONDITION)) + break; + + memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (!(scsi_status & + cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))) + break; + + qlafx00_handle_sense(sp, sense_data, par_sense_len, + sense_len, rsp, res); + } + break; + + case CS_PORT_LOGGED_OUT: + case CS_PORT_CONFIG_CHG: + case CS_PORT_BUSY: + case CS_INCOMPLETE: + case CS_PORT_UNAVAILABLE: + case CS_TIMEOUT: + case CS_RESET: + + /* + * We are going to have the fc class block the rport + * while we try to recover so instruct the mid layer + * to requeue until the class decides how to handle this. + */ + res = DID_TRANSPORT_DISRUPTED << 16; + + ql_dbg(ql_dbg_io, fcport->vha, 0x3057, + "Port down status: port-state=0x%x.\n", + atomic_read(&fcport->state)); + + if (atomic_read(&fcport->state) == FCS_ONLINE) + qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + break; + + case CS_ABORTED: + res = DID_RESET << 16; + break; + + default: + res = DID_ERROR << 16; + break; + } + + if (logit) + ql_dbg(ql_dbg_io, fcport->vha, 0x3058, + "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " + "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " + "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, " + "par_sense_len=0x%x, rsp_info_len=0x%x\n", + comp_status, scsi_status, res, vha->host_no, + cp->device->id, cp->device->lun, fcport->tgt_id, + lscsi_status, cp->cmnd, scsi_bufflen(cp), + rsp_info_len, resid_len, fw_resid_len, sense_len, + par_sense_len, rsp_info_len); + + if (rsp->status_srb == NULL) + sp->done(ha, sp, res); +} + +/** + * qlafx00_status_cont_entry() - Process a Status Continuations entry. + * @ha: SCSI driver HA context + * @pkt: Entry pointer + * + * Extended sense data. + */ +static void +qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) +{ + uint8_t sense_sz = 0; + struct qla_hw_data *ha = rsp->hw; + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); + srb_t *sp = rsp->status_srb; + struct scsi_cmnd *cp; + uint32_t sense_len; + uint8_t *sense_ptr; + + if (!sp) { + ql_dbg(ql_dbg_io, vha, 0x3037, + "no SP, sp = %p\n", sp); + return; + } + + if (!GET_FW_SENSE_LEN(sp)) { + ql_dbg(ql_dbg_io, vha, 0x304b, + "no fw sense data, sp = %p\n", sp); + return; + } + cp = GET_CMD_SP(sp); + if (cp == NULL) { + ql_log(ql_log_warn, vha, 0x303b, + "cmd is NULL: already returned to OS (sp=%p).\n", sp); + + rsp->status_srb = NULL; + return; + } + + if (!GET_CMD_SENSE_LEN(sp)) { + ql_dbg(ql_dbg_io, vha, 0x304c, + "no sense data, sp = %p\n", sp); + } else { + sense_len = GET_CMD_SENSE_LEN(sp); + sense_ptr = GET_CMD_SENSE_PTR(sp); + ql_dbg(ql_dbg_io, vha, 0x304f, + "sp=%p sense_len=0x%x sense_ptr=%p.\n", + sp, sense_len, sense_ptr); + + if (sense_len > sizeof(pkt->data)) + sense_sz = sizeof(pkt->data); + else + sense_sz = sense_len; + + /* Move sense data. */ + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, + (uint8_t *)pkt, sizeof(sts_cont_entry_t)); + memcpy(sense_ptr, pkt->data, sense_sz); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, + sense_ptr, sense_sz); + + sense_len -= sense_sz; + sense_ptr += sense_sz; + + SET_CMD_SENSE_PTR(sp, sense_ptr); + SET_CMD_SENSE_LEN(sp, sense_len); + } + sense_len = GET_FW_SENSE_LEN(sp); + sense_len = (sense_len > sizeof(pkt->data)) ? + (sense_len - sizeof(pkt->data)) : 0; + SET_FW_SENSE_LEN(sp, sense_len); + + /* Place command on done queue. */ + if (sense_len == 0) { + rsp->status_srb = NULL; + sp->done(ha, sp, cp->result); + } +} + +/** + * qlafx00_multistatus_entry() - Process Multi response queue entries. + * @ha: SCSI driver HA context + */ +static void +qlafx00_multistatus_entry(struct scsi_qla_host *vha, + struct rsp_que *rsp, void *pkt) +{ + srb_t *sp; + struct multi_sts_entry_fx00 *stsmfx; + struct qla_hw_data *ha = vha->hw; + uint32_t handle, hindex, handle_count, i; + uint16_t que; + struct req_que *req; + __le32 *handle_ptr; + + stsmfx = (struct multi_sts_entry_fx00 *) pkt; + + handle_count = stsmfx->handle_count; + + if (handle_count > MAX_HANDLE_COUNT) { + ql_dbg(ql_dbg_io, vha, 0x3035, + "Invalid handle count (0x%x).\n", handle_count); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + return; + } + + handle_ptr = &stsmfx->handles[0]; + + for (i = 0; i < handle_count; i++) { + hindex = le32_to_cpu(*handle_ptr); + handle = LSW(hindex); + que = MSW(hindex); + req = ha->req_q_map[que]; + + /* Validate handle. */ + if (handle < req->num_outstanding_cmds) + sp = req->outstanding_cmds[handle]; + else + sp = NULL; + + if (sp == NULL) { + ql_dbg(ql_dbg_io, vha, 0x3044, + "Invalid status handle (0x%x).\n", handle); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + return; + } + qla2x00_process_completed_request(vha, req, handle); + handle_ptr++; + } +} + +/** + * qlafx00_error_entry() - Process an error entry. + * @ha: SCSI driver HA context + * @pkt: Entry pointer + */ +static void +qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, + struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype) +{ + srb_t *sp; + struct qla_hw_data *ha = vha->hw; + const char func[] = "ERROR-IOCB"; + uint16_t que = 0; + struct req_que *req = NULL; + int res = DID_ERROR << 16; + + ql_dbg(ql_dbg_async, vha, 0x507f, + "type of error status in response: 0x%x\n", estatus); + + req = ha->req_q_map[que]; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (sp) { + sp->done(ha, sp, res); + return; + } + + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); +} + +/** + * qlafx00_process_response_queue() - Process response queue entries. + * @ha: SCSI driver HA context + */ +static void +qlafx00_process_response_queue(struct scsi_qla_host *vha, + struct rsp_que *rsp) +{ + struct sts_entry_fx00 *pkt; + response_t *lptr; + uint16_t lreq_q_in = 0; + uint16_t lreq_q_out = 0; + + lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); + lreq_q_out = rsp->ring_index; + + while (lreq_q_in != lreq_q_out) { + lptr = rsp->ring_ptr; + memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, + sizeof(rsp->rsp_pkt)); + pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; + + rsp->ring_index++; + lreq_q_out++; + if (rsp->ring_index == rsp->length) { + lreq_q_out = 0; + rsp->ring_index = 0; + rsp->ring_ptr = rsp->ring; + } else { + rsp->ring_ptr++; + } + + if (pkt->entry_status != 0 && + pkt->entry_type != IOCTL_IOSB_TYPE_FX00) { + qlafx00_error_entry(vha, rsp, + (struct sts_entry_fx00 *)pkt, pkt->entry_status, + pkt->entry_type); + continue; + } + + switch (pkt->entry_type) { + case STATUS_TYPE_FX00: + qlafx00_status_entry(vha, rsp, pkt); + break; + + case STATUS_CONT_TYPE_FX00: + qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); + break; + + case MULTI_STATUS_TYPE_FX00: + qlafx00_multistatus_entry(vha, rsp, pkt); + break; + + case ABORT_IOCB_TYPE_FX00: + qlafx00_abort_iocb_entry(vha, rsp->req, + (struct abort_iocb_entry_fx00 *)pkt); + break; + + case IOCTL_IOSB_TYPE_FX00: + qlafx00_ioctl_iosb_entry(vha, rsp->req, + (struct ioctl_iocb_entry_fx00 *)pkt); + break; + default: + /* Type Not Supported. */ + ql_dbg(ql_dbg_async, vha, 0x5081, + "Received unknown response pkt type %x " + "entry status=%x.\n", + pkt->entry_type, pkt->entry_status); + break; + } + } + + /* Adjust ring index */ + WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); +} + +/** + * qlafx00_async_event() - Process aynchronous events. + * @ha: SCSI driver HA context + */ +static void +qlafx00_async_event(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg; + int data_size = 1; + + reg = &ha->iobase->ispfx00; + /* Setup to process RIO completion. */ + switch (ha->aenmb[0]) { + case QLAFX00_MBA_SYSTEM_ERR: /* System Error */ + ql_log(ql_log_warn, vha, 0x5079, + "ISP System Error - mbx1=%x\n", ha->aenmb[0]); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */ + ql_dbg(ql_dbg_async, vha, 0x5076, + "Asynchronous FW shutdown requested.\n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + break; + + case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ + ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); + ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); + ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3); + ql_dbg(ql_dbg_async, vha, 0x5077, + "Asynchronous port Update received " + "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", + ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]); + data_size = 4; + break; + + case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */ + ql_log(ql_log_info, vha, 0x5085, + "Asynchronous over temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */ + ql_log(ql_log_info, vha, 0x5086, + "Asynchronous normal temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */ + ql_log(ql_log_info, vha, 0x5083, + "Asynchronous critical temperature event received " + "aenmb[0]: %x\n", + ha->aenmb[0]); + break; + + default: + ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); + ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); + ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); + ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4); + ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5); + ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6); + ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7); + ql_dbg(ql_dbg_async, vha, 0x5078, + "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", + ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], + ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]); + break; + } + qlafx00_post_aenfx_work(vha, ha->aenmb[0], + (uint32_t *)ha->aenmb, data_size); +} + +/** + * + * qlafx00x_mbx_completion() - Process mailbox command completions. + * @ha: SCSI driver HA context + * @mb16: Mailbox16 register + */ +static void +qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) +{ + uint16_t cnt; + uint32_t __iomem *wptr; + struct qla_hw_data *ha = vha->hw; + struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; + + if (!ha->mcp32) + ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n"); + + /* Load return mailbox registers. */ + ha->flags.mbox_int = 1; + ha->mailbox_out32[0] = mb0; + wptr = (uint32_t __iomem *)®->mailbox17; + + for (cnt = 1; cnt < ha->mbx_count; cnt++) { + ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); + wptr++; + } +} + +/** + * qlafx00_intr_handler() - Process interrupts for the ISPFX00. + * @irq: + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qlafx00_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct device_reg_fx00 __iomem *reg; + int status; + unsigned long iter; + uint32_t stat; + uint32_t mb[8]; + struct rsp_que *rsp; + unsigned long flags; + uint32_t clr_intr = 0; + uint32_t intr_stat = 0; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0x507d, + "%s: NULL response queue pointer.\n", __func__); + return IRQ_NONE; + } + + ha = rsp->hw; + reg = &ha->iobase->ispfx00; + status = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + + spin_lock_irqsave(&ha->hardware_lock, flags); + vha = pci_get_drvdata(ha->pdev); + for (iter = 50; iter--; clr_intr = 0) { + stat = QLAFX00_RD_INTR_REG(ha); + if (qla2x00_check_reg_for_disconnect(vha, stat)) + break; + intr_stat = stat & QLAFX00_HST_INT_STS_BITS; + if (!intr_stat) + break; + + if (stat & QLAFX00_INTR_MB_CMPLT) { + mb[0] = RD_REG_WORD(®->mailbox16); + qlafx00_mbx_completion(vha, mb[0]); + status |= MBX_INTERRUPT; + clr_intr |= QLAFX00_INTR_MB_CMPLT; + } + if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { + ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0); + qlafx00_async_event(vha); + clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; + } + if (intr_stat & QLAFX00_INTR_RSP_CMPLT) { + qlafx00_process_response_queue(vha, rsp); + clr_intr |= QLAFX00_INTR_RSP_CMPLT; + } + + QLAFX00_CLR_INTR_REG(ha, clr_intr); + QLAFX00_RD_INTR_REG(ha); + } + + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +/** QLAFX00 specific IOCB implementation functions */ + +static inline cont_a64_entry_t * +qlafx00_prep_cont_type1_iocb(struct req_que *req, + cont_a64_entry_t *lcont_pkt) +{ + cont_a64_entry_t *cont_pkt; + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + cont_pkt = (cont_a64_entry_t *)req->ring_ptr; + + /* Load packet defaults. */ + lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00; + + return cont_pkt; +} + +static inline void +qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, + uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt) +{ + uint16_t avail_dsds; + __le32 *cur_dsd; + scsi_qla_host_t *vha; + struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i, cont; + struct req_que *req; + cont_a64_entry_t lcont_pkt; + cont_a64_entry_t *cont_pkt; + + vha = sp->fcport->vha; + req = vha->req; + + cmd = GET_CMD_SP(sp); + cont = 0; + cont_pkt = NULL; + + /* Update entry type to indicate Command Type 3 IOCB */ + lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7; + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + lcmd_pkt->byte_count = __constant_cpu_to_le32(0); + return; + } + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; + vha->qla_stats.output_bytes += scsi_bufflen(cmd); + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + lcmd_pkt->cntrl_flags = TMF_READ_DATA; + vha->qla_stats.input_bytes += scsi_bufflen(cmd); + } + + /* One DSD is available in the Command Type 3 IOCB */ + avail_dsds = 1; + cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address; + + /* Load data segments */ + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + dma_addr_t sle_dma; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE); + cont_pkt = + qlafx00_prep_cont_type1_iocb(req, &lcont_pkt); + cur_dsd = (__le32 *)lcont_pkt.dseg_0_address; + avail_dsds = 5; + cont = 1; + } + + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; + if (avail_dsds == 0 && cont == 1) { + cont = 0; + memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, + REQUEST_ENTRY_SIZE); + } + + } + if (avail_dsds != 0 && cont == 1) { + memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, + REQUEST_ENTRY_SIZE); + } +} + +/** + * qlafx00_start_scsi() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qlafx00_start_scsi(srb_t *sp) +{ + int ret, nseg; + unsigned long flags; + uint32_t index; + uint32_t handle; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct req_que *req = NULL; + struct rsp_que *rsp = NULL; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_qla_host *vha = sp->fcport->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_7_fx00 *cmd_pkt; + struct cmd_type_7_fx00 lcmd_pkt; + struct scsi_lun llun; + char tag[2]; + + /* Setup device pointers. */ + ret = 0; + + rsp = ha->rsp_q_map[0]; + req = vha->req; + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else + nseg = 0; + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); + + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr; + + memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); + + lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); + lcmd_pkt.reserved_0 = 0; + lcmd_pkt.port_path_ctrl = 0; + lcmd_pkt.reserved_1 = 0; + lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); + lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); + + int_to_scsilun(cmd->device->lun, &llun); + host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun, + sizeof(lcmd_pkt.lun)); + + /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + lcmd_pkt.task = TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + lcmd_pkt.task = TSK_ORDERED; + break; + } + } + + /* Load SCSI command packet. */ + host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb)); + lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + + /* Build IOCB segments */ + qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt); + + /* Set total data segment count. */ + lcmd_pkt.entry_count = (uint8_t)req_cnt; + + /* Specify response queue number where completion should happen */ + lcmd_pkt.entry_status = (uint8_t) rsp->id; + + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, + (uint8_t *)cmd->cmnd, cmd->cmd_len); + ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, + (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE); + + memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); + wmb(); + + /* Adjust ring index. */ + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else + req->ring_ptr++; + + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + WRT_REG_DWORD(req->req_q_in, req->ring_index); + QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; + +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_FUNCTION_FAILED; +} + +void +qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) +{ + struct srb_iocb *fxio = &sp->u.iocb_cmd; + scsi_qla_host_t *vha = sp->fcport->vha; + struct req_que *req = vha->req; + struct tsk_mgmt_entry_fx00 tm_iocb; + struct scsi_lun llun; + + memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); + tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; + tm_iocb.entry_count = 1; + tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); + tm_iocb.reserved_0 = 0; + tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); + tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); + if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { + int_to_scsilun(fxio->u.tmf.lun, &llun); + host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun, + sizeof(struct scsi_lun)); + } + + memcpy((void *)ptm_iocb, &tm_iocb, + sizeof(struct tsk_mgmt_entry_fx00)); + wmb(); +} + +void +qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) +{ + struct srb_iocb *fxio = &sp->u.iocb_cmd; + scsi_qla_host_t *vha = sp->fcport->vha; + struct req_que *req = vha->req; + struct abort_iocb_entry_fx00 abt_iocb; + + memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); + abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; + abt_iocb.entry_count = 1; + abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); + abt_iocb.abort_handle = + cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl)); + abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); + abt_iocb.req_que_no = cpu_to_le16(req->id); + + memcpy((void *)pabt_iocb, &abt_iocb, + sizeof(struct abort_iocb_entry_fx00)); + wmb(); +} + +void +qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) +{ + struct srb_iocb *fxio = &sp->u.iocb_cmd; + struct qla_mt_iocb_rqst_fx00 *piocb_rqst; + struct fc_bsg_job *bsg_job; + struct fxdisc_entry_fx00 fx_iocb; + uint8_t entry_cnt = 1; + + memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); + fx_iocb.entry_type = FX00_IOCB_TYPE; + fx_iocb.handle = cpu_to_le32(sp->handle); + fx_iocb.entry_count = entry_cnt; + + if (sp->type == SRB_FXIOCB_DCMD) { + fx_iocb.func_num = + sp->u.iocb_cmd.u.fxiocb.req_func_type; + fx_iocb.adapid = fxio->u.fxiocb.adapter_id; + fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi; + fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0; + fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1; + fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra; + + if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) { + fx_iocb.req_dsdcnt = cpu_to_le16(1); + fx_iocb.req_xfrcnt = + cpu_to_le16(fxio->u.fxiocb.req_len); + fx_iocb.dseg_rq_address[0] = + cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle)); + fx_iocb.dseg_rq_address[1] = + cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle)); + fx_iocb.dseg_rq_len = + cpu_to_le32(fxio->u.fxiocb.req_len); + } + + if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) { + fx_iocb.rsp_dsdcnt = cpu_to_le16(1); + fx_iocb.rsp_xfrcnt = + cpu_to_le16(fxio->u.fxiocb.rsp_len); + fx_iocb.dseg_rsp_address[0] = + cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle)); + fx_iocb.dseg_rsp_address[1] = + cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle)); + fx_iocb.dseg_rsp_len = + cpu_to_le32(fxio->u.fxiocb.rsp_len); + } + + if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) { + fx_iocb.dataword = fxio->u.fxiocb.req_data; + } + fx_iocb.flags = fxio->u.fxiocb.flags; + } else { + struct scatterlist *sg; + bsg_job = sp->u.bsg_job; + piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) + &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + + fx_iocb.func_num = piocb_rqst->func_type; + fx_iocb.adapid = piocb_rqst->adapid; + fx_iocb.adapid_hi = piocb_rqst->adapid_hi; + fx_iocb.reserved_0 = piocb_rqst->reserved_0; + fx_iocb.reserved_1 = piocb_rqst->reserved_1; + fx_iocb.dataword_extra = piocb_rqst->dataword_extra; + fx_iocb.dataword = piocb_rqst->dataword; + fx_iocb.req_xfrcnt = piocb_rqst->req_len; + fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len; + + if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { + int avail_dsds, tot_dsds; + cont_a64_entry_t lcont_pkt; + cont_a64_entry_t *cont_pkt = NULL; + __le32 *cur_dsd; + int index = 0, cont = 0; + + fx_iocb.req_dsdcnt = + cpu_to_le16(bsg_job->request_payload.sg_cnt); + tot_dsds = + bsg_job->request_payload.sg_cnt; + cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0]; + avail_dsds = 1; + for_each_sg(bsg_job->request_payload.sg_list, sg, + tot_dsds, index) { + dma_addr_t sle_dma; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + memset(&lcont_pkt, 0, + REQUEST_ENTRY_SIZE); + cont_pkt = + qlafx00_prep_cont_type1_iocb( + sp->fcport->vha->req, + &lcont_pkt); + cur_dsd = (__le32 *) + lcont_pkt.dseg_0_address; + avail_dsds = 5; + cont = 1; + entry_cnt++; + } + + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; + + if (avail_dsds == 0 && cont == 1) { + cont = 0; + memcpy_toio( + (void __iomem *)cont_pkt, + &lcont_pkt, REQUEST_ENTRY_SIZE); + ql_dump_buffer( + ql_dbg_user + ql_dbg_verbose, + sp->fcport->vha, 0x3042, + (uint8_t *)&lcont_pkt, + REQUEST_ENTRY_SIZE); + } + } + if (avail_dsds != 0 && cont == 1) { + memcpy_toio((void __iomem *)cont_pkt, + &lcont_pkt, REQUEST_ENTRY_SIZE); + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->fcport->vha, 0x3043, + (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); + } + } + + if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { + int avail_dsds, tot_dsds; + cont_a64_entry_t lcont_pkt; + cont_a64_entry_t *cont_pkt = NULL; + __le32 *cur_dsd; + int index = 0, cont = 0; + + fx_iocb.rsp_dsdcnt = + cpu_to_le16(bsg_job->reply_payload.sg_cnt); + tot_dsds = bsg_job->reply_payload.sg_cnt; + cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0]; + avail_dsds = 1; + + for_each_sg(bsg_job->reply_payload.sg_list, sg, + tot_dsds, index) { + dma_addr_t sle_dma; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Cont. + * Type 1 IOCB. + */ + memset(&lcont_pkt, 0, + REQUEST_ENTRY_SIZE); + cont_pkt = + qlafx00_prep_cont_type1_iocb( + sp->fcport->vha->req, + &lcont_pkt); + cur_dsd = (__le32 *) + lcont_pkt.dseg_0_address; + avail_dsds = 5; + cont = 1; + entry_cnt++; + } + + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; + + if (avail_dsds == 0 && cont == 1) { + cont = 0; + memcpy_toio((void __iomem *)cont_pkt, + &lcont_pkt, + REQUEST_ENTRY_SIZE); + ql_dump_buffer( + ql_dbg_user + ql_dbg_verbose, + sp->fcport->vha, 0x3045, + (uint8_t *)&lcont_pkt, + REQUEST_ENTRY_SIZE); + } + } + if (avail_dsds != 0 && cont == 1) { + memcpy_toio((void __iomem *)cont_pkt, + &lcont_pkt, REQUEST_ENTRY_SIZE); + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->fcport->vha, 0x3046, + (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE); + } + } + + if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID) + fx_iocb.dataword = piocb_rqst->dataword; + fx_iocb.flags = piocb_rqst->flags; + fx_iocb.entry_count = entry_cnt; + } + + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, + sp->fcport->vha, 0x3047, + (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); + + memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, + sizeof(struct fxdisc_entry_fx00)); + wmb(); +} diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h new file mode 100644 index 00000000000..aeaa1b40b1f --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_mr.h @@ -0,0 +1,527 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#ifndef __QLA_MR_H +#define __QLA_MR_H + +/* + * The PCI VendorID and DeviceID for our board. + */ +#define PCI_DEVICE_ID_QLOGIC_ISPF001 0xF001 + +/* FX00 specific definitions */ + +#define FX00_COMMAND_TYPE_7 0x07 /* Command Type 7 entry for 7XXX */ +struct cmd_type_7_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint8_t reserved_0; + uint8_t port_path_ctrl; + uint16_t reserved_1; + + __le16 tgt_idx; /* Target Idx. */ + uint16_t timeout; /* Command timeout. */ + + __le16 dseg_count; /* Data segment count. */ + uint8_t scsi_rsp_dsd_len; + uint8_t reserved_2; + + struct scsi_lun lun; /* LUN (LE). */ + + uint8_t cntrl_flags; + + uint8_t task_mgmt_flags; /* Task management flags. */ + + uint8_t task; + + uint8_t crn; + + uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */ + __le32 byte_count; /* Total byte count. */ + + uint32_t dseg_0_address[2]; /* Data segment 0 address. */ + uint32_t dseg_0_len; /* Data segment 0 length. */ +}; + +#define STATUS_TYPE_FX00 0x01 /* Status entry. */ +struct sts_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint32_t reserved_3; /* System handle. */ + + __le16 comp_status; /* Completion status. */ + uint16_t reserved_0; /* OX_ID used by the firmware. */ + + __le32 residual_len; /* FW calc residual transfer length. */ + + uint16_t reserved_1; + uint16_t state_flags; /* State flags. */ + + uint16_t reserved_2; + __le16 scsi_status; /* SCSI status. */ + + uint32_t sense_len; /* FCP SENSE length. */ + uint8_t data[32]; /* FCP response/sense information. */ +}; + + +#define MAX_HANDLE_COUNT 15 +#define MULTI_STATUS_TYPE_FX00 0x0D + +struct multi_sts_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t handle_count; + uint8_t entry_status; + + __le32 handles[MAX_HANDLE_COUNT]; +}; + +#define TSK_MGMT_IOCB_TYPE_FX00 0x05 +struct tsk_mgmt_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; + uint8_t entry_status; /* Entry Status. */ + + __le32 handle; /* System handle. */ + + uint32_t reserved_0; + + __le16 tgt_id; /* Target Idx. */ + + uint16_t reserved_1; + uint16_t reserved_3; + uint16_t reserved_4; + + struct scsi_lun lun; /* LUN (LE). */ + + __le32 control_flags; /* Control Flags. */ + + uint8_t reserved_2[32]; +}; + + +#define ABORT_IOCB_TYPE_FX00 0x08 /* Abort IOCB status. */ +struct abort_iocb_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + __le32 handle; /* System handle. */ + __le32 reserved_0; + + __le16 tgt_id_sts; /* Completion status. */ + __le16 options; + + __le32 abort_handle; /* System handle. */ + __le32 reserved_2; + + __le16 req_que_no; + uint8_t reserved_1[38]; +}; + +#define IOCTL_IOSB_TYPE_FX00 0x0C +struct ioctl_iocb_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint32_t reserved_0; /* System handle. */ + + uint16_t comp_func_num; + __le16 fw_iotcl_flags; + + __le32 dataword_r; /* Data word returned */ + uint32_t adapid; /* Adapter ID */ + uint32_t dataword_r_extra; + + __le32 seq_no; + uint8_t reserved_2[20]; + uint32_t residuallen; + __le32 status; +}; + +#define STATUS_CONT_TYPE_FX00 0x04 + +#define FX00_IOCB_TYPE 0x0B +struct fxdisc_entry_fx00 { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + __le32 handle; /* System handle. */ + __le32 reserved_0; /* System handle. */ + + __le16 func_num; + __le16 req_xfrcnt; + __le16 req_dsdcnt; + __le16 rsp_xfrcnt; + __le16 rsp_dsdcnt; + uint8_t flags; + uint8_t reserved_1; + + __le32 dseg_rq_address[2]; /* Data segment 0 address. */ + __le32 dseg_rq_len; /* Data segment 0 length. */ + __le32 dseg_rsp_address[2]; /* Data segment 1 address. */ + __le32 dseg_rsp_len; /* Data segment 1 length. */ + + __le32 dataword; + __le32 adapid; + __le32 adapid_hi; + __le32 dataword_extra; +}; + +struct qlafx00_tgt_node_info { + uint8_t tgt_node_wwpn[WWN_SIZE]; + uint8_t tgt_node_wwnn[WWN_SIZE]; + uint32_t tgt_node_state; + uint8_t reserved[128]; + uint32_t reserved_1[8]; + uint64_t reserved_2[4]; +} __packed; + +#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info) + +#define QLAFX00_LINK_STATUS_DOWN 0x10 +#define QLAFX00_LINK_STATUS_UP 0x11 + +#define QLAFX00_PORT_SPEED_2G 0x2 +#define QLAFX00_PORT_SPEED_4G 0x4 +#define QLAFX00_PORT_SPEED_8G 0x8 +#define QLAFX00_PORT_SPEED_10G 0xa +struct port_info_data { + uint8_t port_state; + uint8_t port_type; + uint16_t port_identifier; + uint32_t up_port_state; + uint8_t fw_ver_num[32]; + uint8_t portal_attrib; + uint16_t host_option; + uint8_t reset_delay; + uint8_t pdwn_retry_cnt; + uint16_t max_luns2tgt; + uint8_t risc_ver; + uint8_t pconn_option; + uint16_t risc_option; + uint16_t max_frame_len; + uint16_t max_iocb_alloc; + uint16_t exec_throttle; + uint8_t retry_cnt; + uint8_t retry_delay; + uint8_t port_name[8]; + uint8_t port_id[3]; + uint8_t link_status; + uint8_t plink_rate; + uint32_t link_config; + uint16_t adap_haddr; + uint8_t tgt_disc; + uint8_t log_tout; + uint8_t node_name[8]; + uint16_t erisc_opt1; + uint8_t resp_acc_tmr; + uint8_t intr_del_tmr; + uint8_t erisc_opt2; + uint8_t alt_port_name[8]; + uint8_t alt_node_name[8]; + uint8_t link_down_tout; + uint8_t conn_type; + uint8_t fc_fw_mode; + uint32_t uiReserved[48]; +} __packed; + +/* OS Type Designations */ +#define OS_TYPE_UNKNOWN 0 +#define OS_TYPE_LINUX 2 + +/* Linux Info */ +#define SYSNAME_LENGTH 128 +#define NODENAME_LENGTH 64 +#define RELEASE_LENGTH 64 +#define VERSION_LENGTH 64 +#define MACHINE_LENGTH 64 +#define DOMNAME_LENGTH 64 + +struct host_system_info { + uint32_t os_type; + char sysname[SYSNAME_LENGTH]; + char nodename[NODENAME_LENGTH]; + char release[RELEASE_LENGTH]; + char version[VERSION_LENGTH]; + char machine[MACHINE_LENGTH]; + char domainname[DOMNAME_LENGTH]; + char hostdriver[VERSION_LENGTH]; + uint32_t reserved[64]; +} __packed; + +struct register_host_info { + struct host_system_info hsi; /* host system info */ + uint64_t utc; /* UTC (system time) */ + uint32_t reserved[64]; /* future additions */ +} __packed; + + +#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data)) +#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32) + +struct config_info_data { + uint8_t model_num[16]; + uint8_t model_description[80]; + uint8_t reserved0[160]; + uint8_t symbolic_name[64]; + uint8_t serial_num[32]; + uint8_t hw_version[16]; + uint8_t fw_version[16]; + uint8_t uboot_version[16]; + uint8_t fru_serial_num[32]; + + uint8_t fc_port_count; + uint8_t iscsi_port_count; + uint8_t reserved1[2]; + + uint8_t mode; + uint8_t log_level; + uint8_t reserved2[2]; + + uint32_t log_size; + + uint8_t tgt_pres_mode; + uint8_t iqn_flags; + uint8_t lun_mapping; + + uint64_t adapter_id; + + uint32_t cluster_key_len; + uint8_t cluster_key[16]; + + uint64_t cluster_master_id; + uint64_t cluster_slave_id; + uint8_t cluster_flags; + uint32_t enabled_capabilities; + uint32_t nominal_temp_value; +} __packed; + +#define FXDISC_GET_CONFIG_INFO 0x01 +#define FXDISC_GET_PORT_INFO 0x02 +#define FXDISC_GET_TGT_NODE_INFO 0x80 +#define FXDISC_GET_TGT_NODE_LIST 0x81 +#define FXDISC_REG_HOST_INFO 0x99 +#define FXDISC_ABORT_IOCTL 0xff + +#define QLAFX00_HBA_ICNTRL_REG 0x20B08 +#define QLAFX00_ICR_ENB_MASK 0x80000000 +#define QLAFX00_ICR_DIS_MASK 0x7fffffff +#define QLAFX00_HST_RST_REG 0x18264 +#define QLAFX00_SOC_TEMP_REG 0x184C4 +#define QLAFX00_HST_TO_HBA_REG 0x20A04 +#define QLAFX00_HBA_TO_HOST_REG 0x21B70 +#define QLAFX00_HST_INT_STS_BITS 0x7 +#define QLAFX00_BAR1_BASE_ADDR_REG 0x40018 +#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG 0x41824 + +#define QLAFX00_INTR_MB_CMPLT 0x1 +#define QLAFX00_INTR_RSP_CMPLT 0x2 +#define QLAFX00_INTR_ASYNC_CMPLT 0x4 + +#define QLAFX00_MBA_SYSTEM_ERR 0x8002 +#define QLAFX00_MBA_TEMP_OVER 0x8005 +#define QLAFX00_MBA_TEMP_NORM 0x8006 +#define QLAFX00_MBA_TEMP_CRIT 0x8007 +#define QLAFX00_MBA_LINK_UP 0x8011 +#define QLAFX00_MBA_LINK_DOWN 0x8012 +#define QLAFX00_MBA_PORT_UPDATE 0x8014 +#define QLAFX00_MBA_SHUTDOWN_RQSTD 0x8062 + +#define SOC_SW_RST_CONTROL_REG_CORE0 0x0020800 +#define SOC_FABRIC_RST_CONTROL_REG 0x0020840 +#define SOC_FABRIC_CONTROL_REG 0x0020200 +#define SOC_FABRIC_CONFIG_REG 0x0020204 +#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C + +#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00 +#define SOC_CORE_TIMER_REG 0x0021850 +#define SOC_IRQ_ACK_REG 0x00218b4 + +#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */ + +#define QLAFX00_SET_HST_INTR(ha, value) \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \ + value) + +#define QLAFX00_CLR_HST_INTR(ha, value) \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + ~value) + +#define QLAFX00_RD_INTR_REG(ha) \ + RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG) + +#define QLAFX00_CLR_INTR_REG(ha, value) \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + ~value) + +#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\ + WRT_REG_DWORD((ha)->cregbase + off, val) + +#define QLAFX00_GET_HBA_SOC_REG(ha, off)\ + RD_REG_DWORD((ha)->cregbase + off) + +#define QLAFX00_HBA_RST_REG(ha, val)\ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val) + +#define QLAFX00_RD_ICNTRL_REG(ha) \ + RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG) + +#define QLAFX00_ENABLE_ICNTRL_REG(ha) \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \ + QLAFX00_ICR_ENB_MASK)) + +#define QLAFX00_DISABLE_ICNTRL_REG(ha) \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \ + QLAFX00_ICR_DIS_MASK)) + +#define QLAFX00_RD_REG(ha, off) \ + RD_REG_DWORD((ha)->cregbase + off) + +#define QLAFX00_WR_REG(ha, off, val) \ + WRT_REG_DWORD((ha)->cregbase + off, val) + +struct qla_mt_iocb_rqst_fx00 { + __le32 reserved_0; + + __le16 func_type; + uint8_t flags; + uint8_t reserved_1; + + __le32 dataword; + + __le32 adapid; + __le32 adapid_hi; + + __le32 dataword_extra; + + __le16 req_len; + __le16 reserved_2; + + __le16 rsp_len; + __le16 reserved_3; +}; + +struct qla_mt_iocb_rsp_fx00 { + uint32_t reserved_1; + + uint16_t func_type; + __le16 ioctl_flags; + + __le32 ioctl_data; + + uint32_t adapid; + uint32_t adapid_hi; + + uint32_t reserved_2; + __le32 seq_number; + + uint8_t reserved_3[20]; + + int32_t res_count; + + __le32 status; +}; + + +#define MAILBOX_REGISTER_COUNT_FX00 16 +#define AEN_MAILBOX_REGISTER_COUNT_FX00 8 +#define MAX_FIBRE_DEVICES_FX00 512 +#define MAX_LUNS_FX00 0x1024 +#define MAX_TARGETS_FX00 MAX_ISA_DEVICES +#define REQUEST_ENTRY_CNT_FX00 512 /* Number of request entries. */ +#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ + +/* + * Firmware state codes for QLAFX00 adapters + */ +#define FSTATE_FX00_CONFIG_WAIT 0x0000 /* Waiting for driver to issue + * Initialize FW Mbox cmd + */ +#define FSTATE_FX00_INITIALIZED 0x1000 /* FW has been initialized by + * the driver + */ + +#define FX00_DEF_RATOV 10 + +struct mr_data_fx00 { + uint8_t symbolic_name[64]; + uint8_t serial_num[32]; + uint8_t hw_version[16]; + uint8_t fw_version[16]; + uint8_t uboot_version[16]; + uint8_t fru_serial_num[32]; + fc_port_t fcport; /* fcport used for requests + * that are not linked + * to a particular target + */ + uint8_t fw_hbt_en; + uint8_t fw_hbt_cnt; + uint8_t fw_hbt_miss_cnt; + uint32_t old_fw_hbt_cnt; + uint16_t fw_reset_timer_tick; + uint8_t fw_reset_timer_exp; + uint16_t fw_critemp_timer_tick; + uint32_t old_aenmbx0_state; + uint32_t critical_temperature; + bool extended_io_enabled; + bool host_info_resend; + uint8_t hinfo_resend_timer_tick; +}; + +#define QLAFX00_EXTENDED_IO_EN_MASK 0x20 + +/* + * SoC Junction Temperature is stored in + * bits 9:1 of SoC Junction Temperature Register + * in a firmware specific format format. + * To get the temperature in Celsius degrees + * the value from this bitfiled should be converted + * using this formula: + * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825) + * where X is the bit field value + * this macro reads the register, extracts the bitfield value, + * performs the calcualtions and returns temperature in Celsius + */ +#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \ + ((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825) + + +#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */ +#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */ +#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */ +#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */ +#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */ +#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */ +#define QLAFX00_HINFO_RESEND_INTERVAL 60 /* number of seconds */ + +#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */ + +/* Max conncurrent IOs that can be queued */ +#define QLAFX00_MAX_CANQUEUE 1024 + +/* IOCTL IOCB abort success */ +#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS 0x68 + +#endif diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 10754f51830..58f3c912d96 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -857,20 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); - ql_dbg(ql_dbg_p3p, vha, 0xb085, - "Failed to acquire rom lock, acquired by %d.\n", - lock_owner); + ql_log(ql_log_warn, vha, 0xb157, + "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", + __func__, ha->portnum, lock_owner); return -1; } timeout++; } - qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); + qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum); return 0; } static void qla82xx_rom_unlock(struct qla_hw_data *ha) { + qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff); qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); } @@ -954,6 +955,7 @@ static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; + uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { @@ -962,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) loops++; } if (loops >= 50000) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_fatal, vha, 0x00b9, - "Failed to acquire SEM2 lock.\n"); + "Failed to acquire SEM2 lock, Lock Owner %u.\n", + lock_owner); return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); @@ -1061,6 +1065,7 @@ static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; + uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { @@ -1069,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha) loops++; } if (loops >= 50000) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_warn, vha, 0xb010, - "ROM lock failed.\n"); + "ROM lock failed, Lock Owner %u.\n", lock_owner); return -1; } return 0; @@ -1666,8 +1672,14 @@ qla82xx_iospace_config(struct qla_hw_data *ha) } /* Mapping of IO base pointer */ - ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + - 0xbc000 + (ha->pdev->devfn << 11)); + if (IS_QLA8044(ha)) { + ha->iobase = + (device_reg_t *)((uint8_t *)ha->nx_pcibase); + } else if (IS_QLA82XX(ha)) { + ha->iobase = + (device_reg_t *)((uint8_t *)ha->nx_pcibase + + 0xbc000 + (ha->pdev->devfn << 11)); + } if (!ql2xdbwr) { ha->nxdb_wr_ptr = @@ -1967,7 +1979,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \ * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ -static void +void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; @@ -2074,22 +2086,13 @@ qla82xx_intr_handler(int irq, void *dev_id) } WRT_REG_DWORD(®->host_int, 0); } + + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (!ha->flags.msi_enabled) qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); -#ifdef QL_DEBUG_LEVEL_17 - if (!irq && ha->flags.eeh_busy) - ql_log(ql_log_warn, vha, 0x503d, - "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", - status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); -#endif - - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } return IRQ_HANDLED; } @@ -2103,6 +2106,7 @@ qla82xx_msix_default(int irq, void *dev_id) int status = 0; unsigned long flags; uint32_t stat = 0; + uint32_t host_int = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; @@ -2118,7 +2122,10 @@ qla82xx_msix_default(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { - if (RD_REG_DWORD(®->host_int)) { + host_int = RD_REG_DWORD(®->host_int); + if (qla2x00_check_reg_for_disconnect(vha, host_int)) + break; + if (host_int) { stat = RD_REG_DWORD(®->host_status); switch (stat & 0xff) { @@ -2149,20 +2156,9 @@ qla82xx_msix_default(int irq, void *dev_id) WRT_REG_DWORD(®->host_int, 0); } while (0); + qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); -#ifdef QL_DEBUG_LEVEL_17 - if (!irq && ha->flags.eeh_busy) - ql_log(ql_log_warn, vha, 0x5044, - "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", - status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); -#endif - - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && - (status & MBX_INTERRUPT) && ha->flags.mbox_int) { - set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } return IRQ_HANDLED; } @@ -2174,6 +2170,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; unsigned long flags; + uint32_t host_int = 0; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -2186,8 +2183,12 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); + host_int = RD_REG_DWORD(®->host_int); + if (qla2x00_check_reg_for_disconnect(vha, host_int)) + goto out; qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(®->host_int, 0); +out: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } @@ -2201,6 +2202,7 @@ qla82xx_poll(int irq, void *dev_id) struct device_reg_82xx __iomem *reg; int status = 0; uint32_t stat; + uint32_t host_int = 0; uint16_t mb[4]; unsigned long flags; @@ -2216,7 +2218,10 @@ qla82xx_poll(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); - if (RD_REG_DWORD(®->host_int)) { + host_int = RD_REG_DWORD(®->host_int); + if (qla2x00_check_reg_for_disconnect(vha, host_int)) + goto out; + if (host_int) { stat = RD_REG_DWORD(®->host_status); switch (stat & 0xff) { case 0x1: @@ -2242,8 +2247,9 @@ qla82xx_poll(int irq, void *dev_id) stat * 0xff); break; } + WRT_REG_DWORD(®->host_int, 0); } - WRT_REG_DWORD(®->host_int, 0); +out: spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -2253,7 +2259,10 @@ qla82xx_enable_intrs(struct qla_hw_data *ha) scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_enable(vha); spin_lock_irq(&ha->hardware_lock); - qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); + if (IS_QLA8044(ha)) + qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0); + else + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 1; } @@ -2264,7 +2273,10 @@ qla82xx_disable_intrs(struct qla_hw_data *ha) scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_disable(vha); spin_lock_irq(&ha->hardware_lock); - qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); + if (IS_QLA8044(ha)) + qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1); + else + qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 0; } @@ -2809,12 +2821,14 @@ static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + uint32_t lock_owner = 0; - if (qla82xx_rom_lock(ha)) + if (qla82xx_rom_lock(ha)) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); /* Someone else is holding the lock. */ ql_log(ql_log_info, vha, 0xb022, - "Resetting rom_lock.\n"); - + "Resetting rom_lock, Lock Owner %u.\n", lock_owner); + } /* * Either we got the lock, or someone * else died while holding it. @@ -2838,47 +2852,30 @@ static int qla82xx_device_bootstrap(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; - int i, timeout; + int i; uint32_t old_count, count; struct qla_hw_data *ha = vha->hw; - int need_reset = 0, peg_stuck = 1; + int need_reset = 0; need_reset = qla82xx_need_reset(ha); - old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); - - for (i = 0; i < 10; i++) { - timeout = msleep_interruptible(200); - if (timeout) { - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA8XXX_DEV_FAILED); - return QLA_FUNCTION_FAILED; - } - - count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); - if (count != old_count) - peg_stuck = 0; - } - if (need_reset) { /* We are trying to perform a recovery here. */ - if (peg_stuck) + if (ha->flags.isp82xx_fw_hung) qla82xx_rom_lock_recovery(ha); - goto dev_initialize; } else { - /* Start of day for this ha context. */ - if (peg_stuck) { - /* Either we are the first or recovery in progress. */ - qla82xx_rom_lock_recovery(ha); - goto dev_initialize; - } else - /* Firmware already running. */ - goto dev_ready; + old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + for (i = 0; i < 10; i++) { + msleep(200); + count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } + } + qla82xx_rom_lock_recovery(ha); } - return rval; - -dev_initialize: /* set to DEV_INITIALIZING */ ql_log(ql_log_info, vha, 0x009e, "HW State: INITIALIZING.\n"); @@ -3014,6 +3011,9 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) if (IS_QLA82XX(ha)) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_clear_drv_active(ha); + qla8044_idc_unlock(ha); } /* Set DEV_FAILED flag to disable timer */ @@ -3137,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha) if (ql2xmdenable) { if (!ha->fw_dumped) { - if (fw_major_version != ha->fw_major_version || + if ((fw_major_version != ha->fw_major_version || fw_minor_version != ha->fw_minor_version || - fw_subminor_version != ha->fw_subminor_version) { - ql_log(ql_log_info, vha, 0xb02d, - "Firmware version differs " - "Previous version: %d:%d:%d - " - "New version: %d:%d:%d\n", + fw_subminor_version != ha->fw_subminor_version) || + (ha->prev_minidump_failed)) { + ql_dbg(ql_dbg_p3p, vha, 0xb02d, + "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n", fw_major_version, fw_minor_version, fw_subminor_version, ha->fw_major_version, ha->fw_minor_version, - ha->fw_subminor_version); + ha->fw_subminor_version, + ha->prev_minidump_failed); /* Release MiniDump resources */ qla82xx_md_free(vha); /* ALlocate MiniDump resources */ @@ -3336,6 +3336,14 @@ static int qla82xx_check_temp(scsi_qla_host_t *vha) return 0; } +int qla82xx_read_temperature(scsi_qla_host_t *vha) +{ + uint32_t temp; + + temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE); + return qla82xx_get_temp_val(temp); +} + void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; @@ -3345,7 +3353,7 @@ void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) ha->flags.mbox_busy = 0; ql_log(ql_log_warn, vha, 0x6010, "Doing premature completion of mbx command.\n"); - if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) + if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); } } @@ -3429,8 +3437,18 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { - int rval; - rval = qla82xx_device_state_handler(vha); + int rval = -1; + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA82XX(ha)) + rval = qla82xx_device_state_handler(vha); + else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + /* Decide the reset ownership */ + qla83xx_reset_ownership(vha); + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + } return rval; } @@ -3438,17 +3456,25 @@ void qla82xx_set_reset_owner(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - uint32_t dev_state; + uint32_t dev_state = 0; + + if (IS_QLA82XX(ha)) + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + else if (IS_QLA8044(ha)) + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (dev_state == QLA8XXX_DEV_READY) { ql_log(ql_log_info, vha, 0xb02f, "HW State: NEED RESET\n"); - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA8XXX_DEV_NEED_RESET); - ha->flags.nic_core_reset_owner = 1; - ql_dbg(ql_dbg_p3p, vha, 0xb030, - "reset_owner is 0x%x\n", ha->portnum); + if (IS_QLA82XX(ha)) { + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_NEED_RESET); + ha->flags.nic_core_reset_owner = 1; + ql_dbg(ql_dbg_p3p, vha, 0xb030, + "reset_owner is 0x%x\n", ha->portnum); + } else if (IS_QLA8044(ha)) + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_NEED_RESET); } else ql_log(ql_log_info, vha, 0xb031, "Device state is 0x%x = %s.\n", @@ -3469,7 +3495,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha) int qla82xx_abort_isp(scsi_qla_host_t *vha) { - int rval; + int rval = -1; struct qla_hw_data *ha = vha->hw; if (vha->device_flags & DFLG_DEV_FAILED) { @@ -3483,7 +3509,15 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); - rval = qla82xx_device_state_handler(vha); + if (IS_QLA82XX(ha)) + rval = qla82xx_device_state_handler(vha); + else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + /* Decide the reset ownership */ + qla83xx_reset_ownership(vha); + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + } qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); @@ -3603,7 +3637,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) { - int i; + int i, fw_state = 0; unsigned long flags; struct qla_hw_data *ha = vha->hw; @@ -3614,7 +3648,11 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) if (!ha->flags.isp82xx_fw_hung) { for (i = 0; i < 2; i++) { msleep(1000); - if (qla82xx_check_fw_alive(vha)) { + if (IS_QLA82XX(ha)) + fw_state = qla82xx_check_fw_alive(vha); + else if (IS_QLA8044(ha)) + fw_state = qla8044_check_fw_alive(vha); + if (fw_state) { ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); break; @@ -3639,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - if (!sp->u.scmd.ctx || - (sp->flags & SRB_FCP_CMND_DMA_VALID)) { + if ((!sp->u.scmd.ctx || + (sp->flags & + SRB_FCP_CMND_DMA_VALID)) && + !ha->flags.isp82xx_fw_hung) { spin_unlock_irqrestore( &ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { @@ -4078,7 +4118,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, return QLA_SUCCESS; } -static int +int qla82xx_validate_template_chksum(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; @@ -4390,7 +4430,11 @@ qla82xx_md_prep(scsi_qla_host_t *vha) ha->md_template_size / 1024); /* Get Minidump template */ - rval = qla82xx_md_get_template(vha); + if (IS_QLA8044(ha)) + rval = qla8044_md_get_template(vha); + else + rval = qla82xx_md_get_template(vha); + if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb04b, "MiniDump Template obtained\n"); @@ -4455,3 +4499,20 @@ exit: qla82xx_idc_unlock(ha); return rval; } + +void +qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) +{ + struct qla_hw_data *ha = vha->hw; + + if (!ha->allow_cna_fw_dump) + return; + + scsi_block_requests(vha->host); + ha->flags.isp82xx_no_md_cap = 1; + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + qla2x00_wait_for_chip_reset(vha); + scsi_unblock_requests(vha->host); +} diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index d268e8406fd..59c477883a7 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -333,9 +333,6 @@ #define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) #define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER 0x0d417340 - #define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */ #define QLA82XX_PCI_CRB_WINDOW(A) \ (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) @@ -589,6 +586,7 @@ * The PCI VendorID and DeviceID for our board. */ #define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021 +#define PCI_DEVICE_ID_QLOGIC_ISP8044 0x8044 #define QLA82XX_MSIX_TBL_SPACE 8192 #define QLA82XX_PCI_REG_MSIX_TBL 0x44 @@ -954,6 +952,11 @@ struct ct6_dsd { #define QLA82XX_CNTRL 98 #define QLA82XX_TLHDR 99 #define QLA82XX_RDEND 255 +#define QLA8044_POLLRD 35 +#define QLA8044_RDMUX2 36 +#define QLA8044_L1DTG 8 +#define QLA8044_L1ITG 9 +#define QLA8044_POLLRDMWR 37 /* * Opcodes for Control Entries. @@ -1180,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 #define qla82xx_get_temp_val(x) ((x) >> 16) +#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF) #define qla82xx_get_temp_state(x) ((x) & 0xffff) #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) @@ -1191,4 +1195,8 @@ enum { QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */ }; + +#define LEG_INTR_PTR_OFFSET 0x38C0 +#define LEG_INTR_TRIG_OFFSET 0x38C4 +#define LEG_INTR_MASK_OFFSET 0x38C8 #endif diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c new file mode 100644 index 00000000000..da9e3902f21 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -0,0 +1,4079 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ + +#include <linux/vmalloc.h> +#include <linux/delay.h> + +#include "qla_def.h" +#include "qla_gbl.h" + +#include <linux/delay.h> + +#define TIMEOUT_100_MS 100 + +/* 8044 Flash Read/Write functions */ +uint32_t +qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) +{ + return readl((void __iomem *) (ha->nx_pcibase + addr)); +} + +void +qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val) +{ + writel(val, (void __iomem *)((ha)->nx_pcibase + addr)); +} + +int +qla8044_rd_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg) +{ + struct qla_hw_data *ha = vha->hw; + + if (crb_reg < CRB_REG_INDEX_MAX) + return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]); + else + return QLA_FUNCTION_FAILED; +} + +void +qla8044_wr_direct(struct scsi_qla_host *vha, + const uint32_t crb_reg, + const uint32_t value) +{ + struct qla_hw_data *ha = vha->hw; + + if (crb_reg < CRB_REG_INDEX_MAX) + qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value); +} + +static int +qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr) +{ + uint32_t val; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr); + val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum)); + + if (val != addr) { + ql_log(ql_log_warn, vha, 0xb087, + "%s: Failed to set register window : " + "addr written 0x%x, read 0x%x!\n", + __func__, addr, val); + ret_val = QLA_FUNCTION_FAILED; + } + return ret_val; +} + +static int +qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_set_win_base(vha, addr); + if (!ret_val) + *data = qla8044_rd_reg(ha, QLA8044_WILDCARD); + else + ql_log(ql_log_warn, vha, 0xb088, + "%s: failed read of addr 0x%x!\n", __func__, addr); + return ret_val; +} + +static int +qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_set_win_base(vha, addr); + if (!ret_val) + qla8044_wr_reg(ha, QLA8044_WILDCARD, data); + else + ql_log(ql_log_warn, vha, 0xb089, + "%s: failed wrt to addr 0x%x, data 0x%x\n", + __func__, addr, data); + return ret_val; +} + +/* + * qla8044_read_write_crb_reg - Read from raddr and write value to waddr. + * + * @ha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + * + */ +static void +qla8044_read_write_crb_reg(struct scsi_qla_host *vha, + uint32_t raddr, uint32_t waddr) +{ + uint32_t value; + + qla8044_rd_reg_indirect(vha, raddr, &value); + qla8044_wr_reg_indirect(vha, waddr, value); +} + +static int +qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, + uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + + /* jiffies after 100ms */ + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + if (time_after_eq(jiffies, timeout)) { + ql_log(ql_log_warn, vha, 0xb151, + "Error in processing rdmdio entry\n"); + return -1; + } + } while (1); + + return 0; +} + +static uint32_t +qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha, + uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr) +{ + uint32_t temp; + int ret = 0; + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + temp = (0x40000000 | addr); + qla8044_wr_reg_indirect(vha, addr1, temp); + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return 0; + + qla8044_rd_reg_indirect(vha, addr3, &ret); + + return ret; +} + + +static int +qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha, + uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + + /* jiffies after 100 msecs */ + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); + if ((temp & 0x1) != 1) + break; + if (time_after_eq(jiffies, timeout)) { + ql_log(ql_log_warn, vha, 0xb152, + "Error in processing mdiobus idle\n"); + return -1; + } + } while (1); + + return 0; +} + +static int +qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1, + uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) +{ + int ret = 0; + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + qla8044_wr_reg_indirect(vha, addr3, value); + qla8044_wr_reg_indirect(vha, addr1, addr); + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + return 0; +} +/* + * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, + * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. + * + * @vha : Pointer to adapter structure + * @raddr : CRB address to read from + * @waddr : CRB address to write to + * @p_rmw_hdr : header with shift/or/xor values. + * + */ +static void +qla8044_rmw_crb_reg(struct scsi_qla_host *vha, + uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr) +{ + uint32_t value; + + if (p_rmw_hdr->index_a) + value = vha->reset_tmplt.array[p_rmw_hdr->index_a]; + else + qla8044_rd_reg_indirect(vha, raddr, &value); + value &= p_rmw_hdr->test_mask; + value <<= p_rmw_hdr->shl; + value >>= p_rmw_hdr->shr; + value |= p_rmw_hdr->or_value; + value ^= p_rmw_hdr->xor_value; + qla8044_wr_reg_indirect(vha, waddr, value); + return; +} + +inline void +qla8044_set_qsnt_ready(struct scsi_qla_host *vha) +{ + uint32_t qsnt_state; + struct qla_hw_data *ha = vha->hw; + + qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + qsnt_state |= (1 << ha->portnum); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); + ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n", + __func__, vha->host_no, qsnt_state); +} + +void +qla8044_clear_qsnt_ready(struct scsi_qla_host *vha) +{ + uint32_t qsnt_state; + struct qla_hw_data *ha = vha->hw; + + qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + qsnt_state &= ~(1 << ha->portnum); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state); + ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n", + __func__, vha->host_no, qsnt_state); +} + +/** + * + * qla8044_lock_recovery - Recovers the idc_lock. + * @ha : Pointer to adapter structure + * + * Lock Recovery Register + * 5-2 Lock recovery owner: Function ID of driver doing lock recovery, + * valid if bits 1..0 are set by driver doing lock recovery. + * 1-0 1 - Driver intends to force unlock the IDC lock. + * 2 - Driver is moving forward to unlock the IDC lock. Driver clears + * this field after force unlocking the IDC lock. + * + * Lock Recovery process + * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is + * greater than 0, then wait for the other driver to unlock otherwise + * move to the next step. + * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY + * register bits 1..0 and also set the function# in bits 5..2. + * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms. + * Wait for the other driver to perform lock recovery if the function + * number in bits 5..2 has changed, otherwise move to the next step. + * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0 + * leaving your function# in bits 5..2. + * e. Force unlock using the DRIVER_UNLOCK register and immediately clear + * the IDC_LOCK_RECOVERY bits 5..0 by writing 0. + **/ +static int +qla8044_lock_recovery(struct scsi_qla_host *vha) +{ + uint32_t lock = 0, lockid; + struct qla_hw_data *ha = vha->hw; + + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); + + /* Check for other Recovery in progress, go wait */ + if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0) + return QLA_FUNCTION_FAILED; + + /* Intent to Recover */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, + (ha->portnum << + IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER); + msleep(200); + + /* Check Intent to Recover is advertised */ + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY); + if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum << + IDC_LOCK_RECOVERY_STATE_SHIFT_BITS)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n" + , __func__, ha->portnum); + + /* Proceed to Recover */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, + (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | + PROCEED_TO_RECOVER); + + /* Force Unlock() */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF); + qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); + + /* Clear bits 0-5 in IDC_RECOVERY register*/ + qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0); + + /* Get lock() */ + lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); + if (lock) { + lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum; + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid); + return QLA_SUCCESS; + } else + return QLA_FUNCTION_FAILED; +} + +int +qla8044_idc_lock(struct qla_hw_data *ha) +{ + uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0; + uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + while (status == 0) { + /* acquire semaphore5 from PCI HW block */ + status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK); + + if (status) { + /* Increment Counter (8-31) and update func_num (0-7) on + * getting a successful lock */ + lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum; + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id); + break; + } + + if (timeout == 0) + first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + + if (++timeout >= + (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) { + tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + func_num = tmo_owner & 0xFF; + lock_cnt = tmo_owner >> 8; + ql_log(ql_log_warn, vha, 0xb114, + "%s: Lock by func %d failed after 2s, lock held " + "by func %d, lock count %d, first_owner %d\n", + __func__, ha->portnum, func_num, lock_cnt, + (first_owner & 0xFF)); + if (first_owner != tmo_owner) { + /* Some other driver got lock, + * OR same driver got lock again (counter + * value changed), when we were waiting for + * lock. Retry for another 2 sec */ + ql_dbg(ql_dbg_p3p, vha, 0xb115, + "%s: %d: IDC lock failed\n", + __func__, ha->portnum); + timeout = 0; + } else { + /* Same driver holding lock > 2sec. + * Force Recovery */ + if (qla8044_lock_recovery(vha) == QLA_SUCCESS) { + /* Recovered and got lock */ + ret_val = QLA_SUCCESS; + ql_dbg(ql_dbg_p3p, vha, 0xb116, + "%s:IDC lock Recovery by %d" + "successful...\n", __func__, + ha->portnum); + } + /* Recovery Failed, some other function + * has the lock, wait for 2secs + * and retry + */ + ql_dbg(ql_dbg_p3p, vha, 0xb08a, + "%s: IDC lock Recovery by %d " + "failed, Retrying timout\n", __func__, + ha->portnum); + timeout = 0; + } + } + msleep(QLA8044_DRV_LOCK_MSLEEP); + } + return ret_val; +} + +void +qla8044_idc_unlock(struct qla_hw_data *ha) +{ + int id; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + + id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID); + + if ((id & 0xFF) != ha->portnum) { + ql_log(ql_log_warn, vha, 0xb118, + "%s: IDC Unlock by %d failed, lock owner is %d!\n", + __func__, ha->portnum, (id & 0xFF)); + return; + } + + /* Keep lock counter value, update the ha->func_num to 0xFF */ + qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF)); + qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK); +} + +/* 8044 Flash Lock/Unlock functions */ +static int +qla8044_flash_lock(scsi_qla_host_t *vha) +{ + int lock_owner; + int timeout = 0; + uint32_t lock_status = 0; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + while (lock_status == 0) { + lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK); + if (lock_status) + break; + + if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) { + lock_owner = qla8044_rd_reg(ha, + QLA8044_FLASH_LOCK_ID); + ql_log(ql_log_warn, vha, 0xb113, + "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", + __func__, ha->portnum, lock_owner); + ret_val = QLA_FUNCTION_FAILED; + break; + } + msleep(20); + } + qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum); + return ret_val; +} + +static void +qla8044_flash_unlock(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + /* Reading FLASH_UNLOCK register unlocks the Flash */ + qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF); + ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); +} + + +static +void qla8044_flash_lock_recovery(struct scsi_qla_host *vha) +{ + + if (qla8044_flash_lock(vha)) { + /* Someone else is holding the lock. */ + ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n"); + } + + /* + * Either we got the lock, or someone + * else died while holding it. + * In either case, unlock. + */ + qla8044_flash_unlock(vha); +} + +/* + * Address and length are byte address + */ +static int +qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data, + uint32_t flash_addr, int u32_word_count) +{ + int i, ret_val = QLA_SUCCESS; + uint32_t u32_word; + + if (qla8044_flash_lock(vha) != QLA_SUCCESS) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_lock_error; + } + + if (flash_addr & 0x03) { + ql_log(ql_log_warn, vha, 0xb117, + "%s: Illegal addr = 0x%x\n", __func__, flash_addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_flash_read; + } + + for (i = 0; i < u32_word_count; i++) { + if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW, + (flash_addr & 0xFFFF0000))) { + ql_log(ql_log_warn, vha, 0xb119, + "%s: failed to write addr 0x%x to " + "FLASH_DIRECT_WINDOW\n! ", + __func__, flash_addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_flash_read; + } + + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(flash_addr), + &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb08c, + "%s: failed to read addr 0x%x!\n", + __func__, flash_addr); + goto exit_flash_read; + } + + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + flash_addr = flash_addr + 4; + } + +exit_flash_read: + qla8044_flash_unlock(vha); + +exit_lock_error: + return ret_val; +} + +/* + * Address and length are byte address + */ +uint8_t * +qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + scsi_block_requests(vha->host); + if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4) + != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb08d, + "%s: Failed to read from flash\n", + __func__); + } + scsi_unblock_requests(vha->host); + return buf; +} + +inline int +qla8044_need_reset(struct scsi_qla_host *vha) +{ + uint32_t drv_state, drv_active; + int rval; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + rval = drv_state & (1 << ha->portnum); + + if (ha->flags.eeh_busy && drv_active) + rval = 1; + return rval; +} + +/* + * qla8044_write_list - Write the value (p_entry->arg2) to address specified + * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between + * entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset_entry header for WRITE_LIST opcode. + * + */ +static void +qla8044_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + uint32_t i; + + p_entry = (struct qla8044_entry *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_read_write_list - Read from address specified by p_entry->arg1, + * write value read to address specified by p_entry->arg2, for all entries in + * header with delay of p_hdr->delay between entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset_entry header for READ_WRITE_LIST opcode. + * + */ +static void +qla8044_read_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + uint32_t i; + + p_entry = (struct qla8044_entry *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_read_write_crb_reg(vha, p_entry->arg1, + p_entry->arg2); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_poll_reg - Poll the given CRB addr for duration msecs till + * value read ANDed with test_mask is equal to test_result. + * + * @ha : Pointer to adapter structure + * @addr : CRB register address + * @duration : Poll for total of "duration" msecs + * @test_mask : Mask value read with "test_mask" + * @test_result : Compare (value&test_mask) with test_result. + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr, + int duration, uint32_t test_mask, uint32_t test_result) +{ + uint32_t value; + int timeout_error; + uint8_t retries; + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_rd_reg_indirect(vha, addr, &value); + if (ret_val == QLA_FUNCTION_FAILED) { + timeout_error = 1; + goto exit_poll_reg; + } + + /* poll every 1/10 of the total duration */ + retries = duration/10; + + do { + if ((value & test_mask) != test_result) { + timeout_error = 1; + msleep(duration/10); + ret_val = qla8044_rd_reg_indirect(vha, addr, &value); + if (ret_val == QLA_FUNCTION_FAILED) { + timeout_error = 1; + goto exit_poll_reg; + } + } else { + timeout_error = 0; + break; + } + } while (retries--); + +exit_poll_reg: + if (timeout_error) { + vha->reset_tmplt.seq_error++; + ql_log(ql_log_fatal, vha, 0xb090, + "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", + __func__, value, test_mask, test_result); + } + + return timeout_error; +} + +/* + * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB + * register specified by p_entry->arg1 and compare (value AND test_mask) with + * test_result to validate it. Wait for p_hdr->delay between processing entries. + * + * @ha : Pointer to adapter structure + * @p_hdr : reset_entry header for POLL_LIST opcode. + * + */ +static void +qla8044_poll_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla8044_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla8044_poll *) + ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); + + /* Entries start after 8 byte qla8044_poll, poll header contains + * the test_mask, test_value. + */ + p_entry = (struct qla8044_entry *)((char *)p_poll + + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + if (!delay) { + for (i = 0; i < p_hdr->count; i++, p_entry++) + qla8044_poll_reg(vha, p_entry->arg1, + delay, p_poll->test_mask, p_poll->test_value); + } else { + for (i = 0; i < p_hdr->count; i++, p_entry++) { + if (delay) { + if (qla8044_poll_reg(vha, + p_entry->arg1, delay, + p_poll->test_mask, + p_poll->test_value)) { + /*If + * (data_read&test_mask != test_value) + * read TIMEOUT_ADDR (arg1) and + * ADDR (arg2) registers + */ + qla8044_rd_reg_indirect(vha, + p_entry->arg1, &value); + qla8044_rd_reg_indirect(vha, + p_entry->arg2, &value); + } + } + } + } +} + +/* + * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr, + * read ar_addr, if (value& test_mask != test_mask) re-read till timeout + * expires. + * + * @vha : Pointer to adapter structure + * @p_hdr : reset entry header for POLL_WRITE_LIST opcode. + * + */ +static void +qla8044_poll_write_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + struct qla8044_quad_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + + p_poll = (struct qla8044_poll *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_quad_entry *)((char *)p_poll + + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, + p_entry->dr_addr, p_entry->dr_value); + qla8044_wr_reg_indirect(vha, + p_entry->ar_addr, p_entry->ar_value); + if (delay) { + if (qla8044_poll_reg(vha, + p_entry->ar_addr, delay, + p_poll->test_mask, + p_poll->test_value)) { + ql_dbg(ql_dbg_p3p, vha, 0xb091, + "%s: Timeout Error: poll list, ", + __func__); + ql_dbg(ql_dbg_p3p, vha, 0xb092, + "item_num %d, entry_num %d\n", i, + vha->reset_tmplt.seq_index); + } + } + } +} + +/* + * qla8044_read_modify_write - Read value from p_entry->arg1, modify the + * value, write value to p_entry->arg2. Process entries with p_hdr->delay + * between entries. + * + * @vha : Pointer to adapter structure + * @p_hdr : header with shift/or/xor values. + * + */ +static void +qla8044_read_modify_write(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + struct qla8044_entry *p_entry; + struct qla8044_rmw *p_rmw_hdr; + uint32_t i; + + p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr + + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr + + sizeof(struct qla8044_rmw)); + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_rmw_crb_reg(vha, p_entry->arg1, + p_entry->arg2, p_rmw_hdr); + if (p_hdr->delay) + udelay((uint32_t)(p_hdr->delay)); + } +} + +/* + * qla8044_pause - Wait for p_hdr->delay msecs, called between processing + * two entries of a sequence. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static +void qla8044_pause(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + if (p_hdr->delay) + mdelay((uint32_t)((long)p_hdr->delay)); +} + +/* + * qla8044_template_end - Indicates end of reset sequence processing. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static void +qla8044_template_end(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + vha->reset_tmplt.template_end = 1; + + if (vha->reset_tmplt.seq_error == 0) { + ql_dbg(ql_dbg_p3p, vha, 0xb093, + "%s: Reset sequence completed SUCCESSFULLY.\n", __func__); + } else { + ql_log(ql_log_fatal, vha, 0xb094, + "%s: Reset sequence completed with some timeout " + "errors.\n", __func__); + } +} + +/* + * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr, + * if (value & test_mask != test_value) re-read till timeout value expires, + * read dr_addr register and assign to reset_tmplt.array. + * + * @vha : Pointer to adapter structure + * @p_hdr : Common reset entry header. + * + */ +static void +qla8044_poll_read_list(struct scsi_qla_host *vha, + struct qla8044_reset_entry_hdr *p_hdr) +{ + long delay; + int index; + struct qla8044_quad_entry *p_entry; + struct qla8044_poll *p_poll; + uint32_t i; + uint32_t value; + + p_poll = (struct qla8044_poll *) + ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr)); + + p_entry = (struct qla8044_quad_entry *) + ((char *)p_poll + sizeof(struct qla8044_poll)); + + delay = (long)p_hdr->delay; + + for (i = 0; i < p_hdr->count; i++, p_entry++) { + qla8044_wr_reg_indirect(vha, p_entry->ar_addr, + p_entry->ar_value); + if (delay) { + if (qla8044_poll_reg(vha, p_entry->ar_addr, delay, + p_poll->test_mask, p_poll->test_value)) { + ql_dbg(ql_dbg_p3p, vha, 0xb095, + "%s: Timeout Error: poll " + "list, ", __func__); + ql_dbg(ql_dbg_p3p, vha, 0xb096, + "Item_num %d, " + "entry_num %d\n", i, + vha->reset_tmplt.seq_index); + } else { + index = vha->reset_tmplt.array_index; + qla8044_rd_reg_indirect(vha, + p_entry->dr_addr, &value); + vha->reset_tmplt.array[index++] = value; + if (index == QLA8044_MAX_RESET_SEQ_ENTRIES) + vha->reset_tmplt.array_index = 1; + } + } + } +} + +/* + * qla8031_process_reset_template - Process all entries in reset template + * till entry with SEQ_END opcode, which indicates end of the reset template + * processing. Each entry has a Reset Entry header, entry opcode/command, with + * size of the entry, number of entries in sub-sequence and delay in microsecs + * or timeout in millisecs. + * + * @ha : Pointer to adapter structure + * @p_buff : Common reset entry header. + * + */ +static void +qla8044_process_reset_template(struct scsi_qla_host *vha, + char *p_buff) +{ + int index, entries; + struct qla8044_reset_entry_hdr *p_hdr; + char *p_entry = p_buff; + + vha->reset_tmplt.seq_end = 0; + vha->reset_tmplt.template_end = 0; + entries = vha->reset_tmplt.hdr->entries; + index = vha->reset_tmplt.seq_index; + + for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) { + p_hdr = (struct qla8044_reset_entry_hdr *)p_entry; + switch (p_hdr->cmd) { + case OPCODE_NOP: + break; + case OPCODE_WRITE_LIST: + qla8044_write_list(vha, p_hdr); + break; + case OPCODE_READ_WRITE_LIST: + qla8044_read_write_list(vha, p_hdr); + break; + case OPCODE_POLL_LIST: + qla8044_poll_list(vha, p_hdr); + break; + case OPCODE_POLL_WRITE_LIST: + qla8044_poll_write_list(vha, p_hdr); + break; + case OPCODE_READ_MODIFY_WRITE: + qla8044_read_modify_write(vha, p_hdr); + break; + case OPCODE_SEQ_PAUSE: + qla8044_pause(vha, p_hdr); + break; + case OPCODE_SEQ_END: + vha->reset_tmplt.seq_end = 1; + break; + case OPCODE_TMPL_END: + qla8044_template_end(vha, p_hdr); + break; + case OPCODE_POLL_READ_LIST: + qla8044_poll_read_list(vha, p_hdr); + break; + default: + ql_log(ql_log_fatal, vha, 0xb097, + "%s: Unknown command ==> 0x%04x on " + "entry = %d\n", __func__, p_hdr->cmd, index); + break; + } + /* + *Set pointer to next entry in the sequence. + */ + p_entry += p_hdr->size; + } + vha->reset_tmplt.seq_index = index; +} + +static void +qla8044_process_init_seq(struct scsi_qla_host *vha) +{ + qla8044_process_reset_template(vha, + vha->reset_tmplt.init_offset); + if (vha->reset_tmplt.seq_end != 1) + ql_log(ql_log_fatal, vha, 0xb098, + "%s: Abrupt INIT Sub-Sequence end.\n", + __func__); +} + +static void +qla8044_process_stop_seq(struct scsi_qla_host *vha) +{ + vha->reset_tmplt.seq_index = 0; + qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset); + if (vha->reset_tmplt.seq_end != 1) + ql_log(ql_log_fatal, vha, 0xb099, + "%s: Abrupt STOP Sub-Sequence end.\n", __func__); +} + +static void +qla8044_process_start_seq(struct scsi_qla_host *vha) +{ + qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset); + if (vha->reset_tmplt.template_end != 1) + ql_log(ql_log_fatal, vha, 0xb09a, + "%s: Abrupt START Sub-Sequence end.\n", + __func__); +} + +static int +qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha, + uint32_t flash_addr, uint8_t *p_data, int u32_word_count) +{ + uint32_t i; + uint32_t u32_word; + uint32_t flash_offset; + uint32_t addr = flash_addr; + int ret_val = QLA_SUCCESS; + + flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1); + + if (addr & 0x3) { + ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n", + __func__, addr); + ret_val = QLA_FUNCTION_FAILED; + goto exit_lockless_read; + } + + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_DIRECT_WINDOW, (addr)); + + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09c, + "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + + /* Check if data is spread across multiple sectors */ + if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > + (QLA8044_FLASH_SECTOR_SIZE - 1)) { + /* Multi sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09d, + "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + addr = addr + 4; + flash_offset = flash_offset + 4; + if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) { + /* This write is needed once for each sector */ + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_DIRECT_WINDOW, (addr)); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb09f, + "%s: failed to write addr " + "0x%x to FLASH_DIRECT_WINDOW!\n", + __func__, addr); + goto exit_lockless_read; + } + flash_offset = 0; + } + } + } else { + /* Single sector read */ + for (i = 0; i < u32_word_count; i++) { + ret_val = qla8044_rd_reg_indirect(vha, + QLA8044_FLASH_DIRECT_DATA(addr), &u32_word); + if (ret_val != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb0a0, + "%s: failed to read addr 0x%x!\n", + __func__, addr); + goto exit_lockless_read; + } + *(uint32_t *)p_data = u32_word; + p_data = p_data + 4; + addr = addr + 4; + } + } + +exit_lockless_read: + return ret_val; +} + +/* + * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory + * + * @vha : Pointer to adapter structure + * addr : Flash address to write to + * data : Data to be written + * count : word_count to be written + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_ms_mem_write_128b(struct scsi_qla_host *vha, + uint64_t addr, uint32_t *data, uint32_t count) +{ + int i, j, ret_val = QLA_SUCCESS; + uint32_t agt_ctrl; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + /* Only 128-bit aligned access */ + if (addr & 0xF) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write; + } + write_lock_irqsave(&ha->hw_lock, flags); + + /* Write address */ + ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a1, + "%s: write to AGT_ADDR_HI failed!\n", __func__); + goto exit_ms_mem_write_unlock; + } + + for (i = 0; i < count; i++, addr += 16) { + if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET, + QLA8044_ADDR_QDR_NET_MAX)) || + (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET, + QLA8044_ADDR_DDR_NET_MAX)))) { + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write_unlock; + } + + ret_val = qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_ADDR_LO, addr); + + /* Write data */ + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_LO, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_HI, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_ULO, *data++); + ret_val += qla8044_wr_reg_indirect(vha, + MD_MIU_TEST_AGT_WRDATA_UHI, *data++); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a2, + "%s: write to AGT_WRDATA failed!\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + /* Check write status */ + ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_ENABLE); + ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_START); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a3, + "%s: write to AGT_CTRL failed!\n", __func__); + goto exit_ms_mem_write_unlock; + } + + for (j = 0; j < MAX_CTL_CHECK; j++) { + ret_val = qla8044_rd_reg_indirect(vha, + MD_MIU_TEST_AGT_CTRL, &agt_ctrl); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a4, + "%s: failed to read " + "MD_MIU_TEST_AGT_CTRL!\n", __func__); + goto exit_ms_mem_write_unlock; + } + if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) + break; + } + + /* Status check failed */ + if (j >= MAX_CTL_CHECK) { + ql_log(ql_log_fatal, vha, 0xb0a5, + "%s: MS memory write failed!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_ms_mem_write_unlock; + } + } + +exit_ms_mem_write_unlock: + write_unlock_irqrestore(&ha->hw_lock, flags); + +exit_ms_mem_write: + return ret_val; +} + +static int +qla8044_copy_bootloader(struct scsi_qla_host *vha) +{ + uint8_t *p_cache; + uint32_t src, count, size; + uint64_t dest; + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + src = QLA8044_BOOTLOADER_FLASH_ADDR; + dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR); + size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE); + + /* 128 bit alignment check */ + if (size & 0xF) + size = (size + 16) & ~0xF; + + /* 16 byte count */ + count = size/16; + + p_cache = vmalloc(size); + if (p_cache == NULL) { + ql_log(ql_log_fatal, vha, 0xb0a6, + "%s: Failed to allocate memory for " + "boot loader cache\n", __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_copy_bootloader; + } + + ret_val = qla8044_lockless_flash_read_u32(vha, src, + p_cache, size/sizeof(uint32_t)); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a7, + "%s: Error reading F/W from flash!!!\n", __func__); + goto exit_copy_error; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n", + __func__); + + /* 128 bit/16 byte write to MS memory */ + ret_val = qla8044_ms_mem_write_128b(vha, dest, + (uint32_t *)p_cache, count); + if (ret_val == QLA_FUNCTION_FAILED) { + ql_log(ql_log_fatal, vha, 0xb0a9, + "%s: Error writing F/W to MS !!!\n", __func__); + goto exit_copy_error; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0aa, + "%s: Wrote F/W (size %d) to MS !!!\n", + __func__, size); + +exit_copy_error: + vfree(p_cache); + +exit_copy_bootloader: + return ret_val; +} + +static int +qla8044_restart(struct scsi_qla_host *vha) +{ + int ret_val = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + qla8044_process_stop_seq(vha); + + /* Collect minidump */ + if (ql2xmdenable) + qla8044_get_minidump(vha); + else + ql_log(ql_log_fatal, vha, 0xb14c, + "Minidump disabled.\n"); + + qla8044_process_init_seq(vha); + + if (qla8044_copy_bootloader(vha)) { + ql_log(ql_log_fatal, vha, 0xb0ab, + "%s: Copy bootloader, firmware restart failed!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_restart; + } + + /* + * Loads F/W from flash + */ + qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH); + + qla8044_process_start_seq(vha); + +exit_restart: + return ret_val; +} + +/* + * qla8044_check_cmd_peg_status - Check peg status to see if Peg is + * initialized. + * + * @ha : Pointer to adapter structure + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_check_cmd_peg_status(struct scsi_qla_host *vha) +{ + uint32_t val, ret_val = QLA_FUNCTION_FAILED; + int retries = CRB_CMDPEG_CHECK_RETRY_COUNT; + struct qla_hw_data *ha = vha->hw; + + do { + val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE); + if (val == PHAN_INITIALIZE_COMPLETE) { + ql_dbg(ql_dbg_p3p, vha, 0xb0ac, + "%s: Command Peg initialization " + "complete! state=0x%x\n", __func__, val); + ret_val = QLA_SUCCESS; + break; + } + msleep(CRB_CMDPEG_CHECK_DELAY); + } while (--retries); + + return ret_val; +} + +static int +qla8044_start_firmware(struct scsi_qla_host *vha) +{ + int ret_val = QLA_SUCCESS; + + if (qla8044_restart(vha)) { + ql_log(ql_log_fatal, vha, 0xb0ad, + "%s: Restart Error!!!, Need Reset!!!\n", + __func__); + ret_val = QLA_FUNCTION_FAILED; + goto exit_start_fw; + } else + ql_dbg(ql_dbg_p3p, vha, 0xb0af, + "%s: Restart done!\n", __func__); + + ret_val = qla8044_check_cmd_peg_status(vha); + if (ret_val) { + ql_log(ql_log_fatal, vha, 0xb0b0, + "%s: Peg not initialized!\n", __func__); + ret_val = QLA_FUNCTION_FAILED; + } + +exit_start_fw: + return ret_val; +} + +void +qla8044_clear_drv_active(struct qla_hw_data *ha) +{ + uint32_t drv_active; + struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_active &= ~(1 << (ha->portnum)); + + ql_log(ql_log_info, vha, 0xb0b1, + "%s(%ld): drv_active: 0x%08x\n", + __func__, vha->host_no, drv_active); + + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); +} + +/* + * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +static int +qla8044_device_bootstrap(struct scsi_qla_host *vha) +{ + int rval = QLA_FUNCTION_FAILED; + int i; + uint32_t old_count = 0, count = 0; + int need_reset = 0; + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + need_reset = qla8044_need_reset(vha); + + if (!need_reset) { + old_count = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + + for (i = 0; i < 10; i++) { + msleep(200); + + count = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } + } + qla8044_flash_lock_recovery(vha); + } else { + /* We are trying to perform a recovery here. */ + if (ha->flags.isp82xx_fw_hung) + qla8044_flash_lock_recovery(vha); + } + + /* set to DEV_INITIALIZING */ + ql_log(ql_log_info, vha, 0xb0b2, + "%s: HW State: INITIALIZING\n", __func__); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_INITIALIZING); + + qla8044_idc_unlock(ha); + rval = qla8044_start_firmware(vha); + qla8044_idc_lock(ha); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0xb0b3, + "%s: HW State: FAILED\n", __func__); + qla8044_clear_drv_active(ha); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + return rval; + } + + /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after + * device goes to INIT state. */ + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + if (idc_ctrl & GRACEFUL_RESET_BIT1) { + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_ctrl & ~GRACEFUL_RESET_BIT1)); + ha->fw_dumped = 0; + } + +dev_ready: + ql_log(ql_log_info, vha, 0xb0b4, + "%s: HW State: READY\n", __func__); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY); + + return rval; +} + +/*-------------------------Reset Sequence Functions-----------------------*/ +static void +qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha) +{ + u8 *phdr; + + if (!vha->reset_tmplt.buff) { + ql_log(ql_log_fatal, vha, 0xb0b5, + "%s: Error Invalid reset_seq_template\n", __func__); + return; + } + + phdr = vha->reset_tmplt.buff; + ql_dbg(ql_dbg_p3p, vha, 0xb0b6, + "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X" + "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n" + "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n", + *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4), + *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8), + *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12), + *(phdr+13), *(phdr+14), *(phdr+15)); +} + +/* + * qla8044_reset_seq_checksum_test - Validate Reset Sequence template. + * + * @ha : Pointer to adapter structure + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + */ +static int +qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha) +{ + uint32_t sum = 0; + uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff; + int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t); + + while (u16_count-- > 0) + sum += *buff++; + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + /* checksum of 0 indicates a valid template */ + if (~sum) { + return QLA_SUCCESS; + } else { + ql_log(ql_log_fatal, vha, 0xb0b7, + "%s: Reset seq checksum failed\n", __func__); + return QLA_FUNCTION_FAILED; + } +} + +/* + * qla8044_read_reset_template - Read Reset Template from Flash, validate + * the template and store offsets of stop/start/init offsets in ha->reset_tmplt. + * + * @ha : Pointer to adapter structure + */ +void +qla8044_read_reset_template(struct scsi_qla_host *vha) +{ + uint8_t *p_buff; + uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size; + + vha->reset_tmplt.seq_error = 0; + vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE); + if (vha->reset_tmplt.buff == NULL) { + ql_log(ql_log_fatal, vha, 0xb0b8, + "%s: Failed to allocate reset template resources\n", + __func__); + goto exit_read_reset_template; + } + + p_buff = vha->reset_tmplt.buff; + addr = QLA8044_RESET_TEMPLATE_ADDR; + + tmplt_hdr_def_size = + sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0b9, + "%s: Read template hdr size %d from Flash\n", + __func__, tmplt_hdr_def_size); + + /* Copy template header from flash */ + if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { + ql_log(ql_log_fatal, vha, 0xb0ba, + "%s: Failed to read reset template\n", __func__); + goto exit_read_template_error; + } + + vha->reset_tmplt.hdr = + (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff; + + /* Validate the template header size and signature */ + tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t); + if ((tmplt_hdr_size != tmplt_hdr_def_size) || + (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) { + ql_log(ql_log_fatal, vha, 0xb0bb, + "%s: Template Header size invalid %d " + "tmplt_hdr_def_size %d!!!\n", __func__, + tmplt_hdr_size, tmplt_hdr_def_size); + goto exit_read_template_error; + } + + addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size; + p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size; + tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size - + vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0bc, + "%s: Read rest of the template size %d\n", + __func__, vha->reset_tmplt.hdr->size); + + /* Copy rest of the template */ + if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) { + ql_log(ql_log_fatal, vha, 0xb0bd, + "%s: Failed to read reset tempelate\n", __func__); + goto exit_read_template_error; + } + + /* Integrity check */ + if (qla8044_reset_seq_checksum_test(vha)) { + ql_log(ql_log_fatal, vha, 0xb0be, + "%s: Reset Seq checksum failed!\n", __func__); + goto exit_read_template_error; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb0bf, + "%s: Reset Seq checksum passed! Get stop, " + "start and init seq offsets\n", __func__); + + /* Get STOP, START, INIT sequence offsets */ + vha->reset_tmplt.init_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->init_seq_offset; + + vha->reset_tmplt.start_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->start_seq_offset; + + vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff + + vha->reset_tmplt.hdr->hdr_size; + + qla8044_dump_reset_seq_hdr(vha); + + goto exit_read_reset_template; + +exit_read_template_error: + vfree(vha->reset_tmplt.buff); + +exit_read_reset_template: + return; +} + +void +qla8044_set_idc_dontreset(struct scsi_qla_host *vha) +{ + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + idc_ctrl |= DONTRESET_BIT0; + ql_dbg(ql_dbg_p3p, vha, 0xb0c0, + "%s: idc_ctrl = %d\n", __func__, idc_ctrl); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); +} + +inline void +qla8044_set_rst_ready(struct scsi_qla_host *vha) +{ + uint32_t drv_state; + struct qla_hw_data *ha = vha->hw; + + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + /* For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function.*/ + drv_state |= (1 << ha->portnum); + + ql_log(ql_log_info, vha, 0xb0c1, + "%s(%ld): drv_state: 0x%08x\n", + __func__, vha->host_no, drv_state); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); +} + +/** + * qla8044_need_reset_handler - Code to start reset sequence + * @ha: pointer to adapter structure + * + * Note: IDC lock must be held upon entry + **/ +static void +qla8044_need_reset_handler(struct scsi_qla_host *vha) +{ + uint32_t dev_state = 0, drv_state, drv_active; + unsigned long reset_timeout; + struct qla_hw_data *ha = vha->hw; + + ql_log(ql_log_fatal, vha, 0xb0c2, + "%s: Performing ISP error recovery\n", __func__); + + if (vha->flags.online) { + qla8044_idc_unlock(ha); + qla2x00_abort_isp_cleanup(vha); + ha->isp_ops->get_flash_version(vha, vha->req->ring); + ha->isp_ops->nvram_config(vha); + qla8044_idc_lock(ha); + } + + dev_state = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX); + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + + ql_log(ql_log_info, vha, 0xb0c5, + "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", + __func__, vha->host_no, drv_state, drv_active, dev_state); + + qla8044_set_rst_ready(vha); + + /* wait for 10 seconds for reset ack from all functions */ + reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); + + do { + if (time_after_eq(jiffies, reset_timeout)) { + ql_log(ql_log_info, vha, 0xb0c4, + "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", + __func__, ha->portnum, drv_state, drv_active); + break; + } + + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + dev_state = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX); + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + } while (((drv_state & drv_active) != drv_active) && + (dev_state == QLA8XXX_DEV_NEED_RESET)); + + /* Remove IDC participation of functions not acknowledging */ + if (drv_state != drv_active) { + ql_log(ql_log_info, vha, 0xb0c7, + "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", + __func__, vha->host_no, ha->portnum, + (drv_active ^ drv_state)); + drv_active = drv_active & drv_state; + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, + drv_active); + } else { + /* + * Reset owner should execute reset recovery, + * if all functions acknowledged + */ + if ((ha->flags.nic_core_reset_owner) && + (dev_state == QLA8XXX_DEV_NEED_RESET)) { + ha->flags.nic_core_reset_owner = 0; + qla8044_device_bootstrap(vha); + return; + } + } + + /* Exit if non active function */ + if (!(drv_active & (1 << ha->portnum))) { + ha->flags.nic_core_reset_owner = 0; + return; + } + + /* + * Execute Reset Recovery if Reset Owner or Function 7 + * is the only active function + */ + if (ha->flags.nic_core_reset_owner || + ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) { + ha->flags.nic_core_reset_owner = 0; + qla8044_device_bootstrap(vha); + } +} + +static void +qla8044_set_drv_active(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function.*/ + drv_active |= (1 << ha->portnum); + + ql_log(ql_log_info, vha, 0xb0c8, + "%s(%ld): drv_active: 0x%08x\n", + __func__, vha->host_no, drv_active); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); +} + +static int +qla8044_check_drv_active(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + if (drv_active & (1 << ha->portnum)) + return QLA_SUCCESS; + else + return QLA_TEST_FAILED; +} + +static void +qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) +{ + uint32_t idc_ctrl; + struct qla_hw_data *ha = vha->hw; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + idc_ctrl &= ~DONTRESET_BIT0; + ql_log(ql_log_info, vha, 0xb0c9, + "%s: idc_ctrl = %d\n", __func__, + idc_ctrl); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); +} + +static int +qla8044_set_idc_ver(struct scsi_qla_host *vha) +{ + int idc_ver; + uint32_t drv_active; + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + if (drv_active == (1 << ha->portnum)) { + idc_ver = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_IDC_VERSION_INDEX); + idc_ver &= (~0xFF); + idc_ver |= QLA8044_IDC_VER_MAJ_VALUE; + qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX, + idc_ver); + ql_log(ql_log_info, vha, 0xb0ca, + "%s: IDC version updated to %d\n", + __func__, idc_ver); + } else { + idc_ver = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_IDC_VERSION_INDEX); + idc_ver &= 0xFF; + if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) { + ql_log(ql_log_info, vha, 0xb0cb, + "%s: qla4xxx driver IDC version %d " + "is not compatible with IDC version %d " + "of other drivers!\n", + __func__, QLA8044_IDC_VER_MAJ_VALUE, + idc_ver); + rval = QLA_FUNCTION_FAILED; + goto exit_set_idc_ver; + } + } + + /* Update IDC_MINOR_VERSION */ + idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR); + idc_ver &= ~(0x03 << (ha->portnum * 2)); + idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2)); + qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver); + +exit_set_idc_ver: + return rval; +} + +static int +qla8044_update_idc_reg(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + int rval = QLA_SUCCESS; + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.init_done) + goto exit_update_idc_reg; + + qla8044_idc_lock(ha); + qla8044_set_drv_active(vha); + + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* If we are the first driver to load and + * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */ + if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba) + qla8044_clear_idc_dontreset(vha); + + rval = qla8044_set_idc_ver(vha); + if (rval == QLA_FUNCTION_FAILED) + qla8044_clear_drv_active(ha); + qla8044_idc_unlock(ha); + +exit_update_idc_reg: + return rval; +} + +/** + * qla8044_need_qsnt_handler - Code to start qsnt + * @ha: pointer to adapter structure + **/ +static void +qla8044_need_qsnt_handler(struct scsi_qla_host *vha) +{ + unsigned long qsnt_timeout; + uint32_t drv_state, drv_active, dev_state; + struct qla_hw_data *ha = vha->hw; + + if (vha->flags.online) + qla2x00_quiesce_io(vha); + else + return; + + qla8044_set_qsnt_ready(vha); + + /* Wait for 30 secs for all functions to ack qsnt mode */ + qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ); + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + + /* Shift drv_active by 1 to match drv_state. As quiescent ready bit + position is at bit 1 and drv active is at bit 0 */ + drv_active = drv_active << 1; + + while (drv_state != drv_active) { + if (time_after_eq(jiffies, qsnt_timeout)) { + /* Other functions did not ack, changing state to + * DEV_READY + */ + clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_READY); + qla8044_clear_qsnt_ready(vha); + ql_log(ql_log_info, vha, 0xb0cc, + "Timeout waiting for quiescent ack!!!\n"); + return; + } + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + drv_active = drv_active << 1; + } + + /* All functions have Acked. Set quiescent state */ + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_QUIESCENT); + ql_log(ql_log_info, vha, 0xb0cd, + "%s: HW State: QUIESCENT\n", __func__); + } +} + +/* + * qla8044_device_state_handler - Adapter state machine + * @ha: pointer to host adapter structure. + * + * Note: IDC lock must be UNLOCKED upon entry + **/ +int +qla8044_device_state_handler(struct scsi_qla_host *vha) +{ + uint32_t dev_state; + int rval = QLA_SUCCESS; + unsigned long dev_init_timeout; + struct qla_hw_data *ha = vha->hw; + + rval = qla8044_update_idc_reg(vha); + if (rval == QLA_FUNCTION_FAILED) + goto exit_error; + + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + ql_dbg(ql_dbg_p3p, vha, 0xb0ce, + "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state(dev_state) : "Unknown"); + + /* wait for 30 seconds for device to go ready */ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); + + qla8044_idc_lock(ha); + + while (1) { + if (time_after_eq(jiffies, dev_init_timeout)) { + if (qla8044_check_drv_active(vha) == QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb0cf, + "%s: Device Init Failed 0x%x = %s\n", + QLA2XXX_DRIVER_NAME, dev_state, + dev_state < MAX_STATES ? + qdev_state(dev_state) : "Unknown"); + qla8044_wr_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + } + } + + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + ql_log(ql_log_info, vha, 0xb0d0, + "Device state is 0x%x = %s\n", + dev_state, dev_state < MAX_STATES ? + qdev_state(dev_state) : "Unknown"); + + /* NOTE: Make sure idc unlocked upon exit of switch statement */ + switch (dev_state) { + case QLA8XXX_DEV_READY: + ha->flags.nic_core_reset_owner = 0; + goto exit; + case QLA8XXX_DEV_COLD: + rval = qla8044_device_bootstrap(vha); + break; + case QLA8XXX_DEV_INITIALIZING: + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + break; + case QLA8XXX_DEV_NEED_RESET: + /* For ISP8044, if NEED_RESET is set by any driver, + * it should be honored, irrespective of IDC_CTRL + * DONTRESET_BIT0 */ + qla8044_need_reset_handler(vha); + break; + case QLA8XXX_DEV_NEED_QUIESCENT: + /* idc locked/unlocked in handler */ + qla8044_need_qsnt_handler(vha); + + /* Reset the init timeout after qsnt handler */ + dev_init_timeout = jiffies + + (ha->fcoe_reset_timeout * HZ); + break; + case QLA8XXX_DEV_QUIESCENT: + ql_log(ql_log_info, vha, 0xb0d1, + "HW State: QUIESCENT\n"); + + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); + + /* Reset the init timeout after qsnt handler */ + dev_init_timeout = jiffies + + (ha->fcoe_reset_timeout * HZ); + break; + case QLA8XXX_DEV_FAILED: + ha->flags.nic_core_reset_owner = 0; + qla8044_idc_unlock(ha); + qla8xxx_dev_failed_handler(vha); + rval = QLA_FUNCTION_FAILED; + qla8044_idc_lock(ha); + goto exit; + default: + qla8044_idc_unlock(ha); + qla8xxx_dev_failed_handler(vha); + rval = QLA_FUNCTION_FAILED; + qla8044_idc_lock(ha); + goto exit; + } + } +exit: + qla8044_idc_unlock(ha); + +exit_error: + return rval; +} + +/** + * qla4_8xxx_check_temp - Check the ISP82XX temperature. + * @ha: adapter block pointer. + * + * Note: The caller should not hold the idc lock. + **/ +static int +qla8044_check_temp(struct scsi_qla_host *vha) +{ + uint32_t temp, temp_state, temp_val; + int status = QLA_SUCCESS; + + temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); + temp_state = qla82xx_get_temp_state(temp); + temp_val = qla82xx_get_temp_val(temp); + + if (temp_state == QLA82XX_TEMP_PANIC) { + ql_log(ql_log_warn, vha, 0xb0d2, + "Device temperature %d degrees C" + " exceeds maximum allowed. Hardware has been shut" + " down\n", temp_val); + status = QLA_FUNCTION_FAILED; + return status; + } else if (temp_state == QLA82XX_TEMP_WARN) { + ql_log(ql_log_warn, vha, 0xb0d3, + "Device temperature %d" + " degrees C exceeds operating range." + " Immediate action needed.\n", temp_val); + } + return 0; +} + +int qla8044_read_temperature(scsi_qla_host_t *vha) +{ + uint32_t temp; + + temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX); + return qla82xx_get_temp_val(temp); +} + +/** + * qla8044_check_fw_alive - Check firmware health + * @ha: Pointer to host adapter structure. + * + * Context: Interrupt + **/ +int +qla8044_check_fw_alive(struct scsi_qla_host *vha) +{ + uint32_t fw_heartbeat_counter; + uint32_t halt_status1, halt_status2; + int status = QLA_SUCCESS; + + fw_heartbeat_counter = qla8044_rd_direct(vha, + QLA8044_PEG_ALIVE_COUNTER_INDEX); + + /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */ + if (fw_heartbeat_counter == 0xffffffff) { + ql_dbg(ql_dbg_p3p, vha, 0xb0d4, + "scsi%ld: %s: Device in frozen " + "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", + vha->host_no, __func__); + return status; + } + + if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { + vha->seconds_since_last_heartbeat++; + /* FW not alive after 2 seconds */ + if (vha->seconds_since_last_heartbeat == 2) { + vha->seconds_since_last_heartbeat = 0; + halt_status1 = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS1_INDEX); + halt_status2 = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS2_INDEX); + + ql_log(ql_log_info, vha, 0xb0d5, + "scsi(%ld): %s, ISP8044 " + "Dumping hw/fw registers:\n" + " PEG_HALT_STATUS1: 0x%x, " + "PEG_HALT_STATUS2: 0x%x,\n", + vha->host_no, __func__, halt_status1, + halt_status2); + status = QLA_FUNCTION_FAILED; + } + } else + vha->seconds_since_last_heartbeat = 0; + + vha->fw_heartbeat_counter = fw_heartbeat_counter; + return status; +} + +void +qla8044_watchdog(struct scsi_qla_host *vha) +{ + uint32_t dev_state, halt_status; + int halt_status_unrecoverable = 0; + struct qla_hw_data *ha = vha->hw; + + /* don't poll if reset is going on or FW hang in quiescent state */ + if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || + test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (qla8044_check_fw_alive(vha)) { + ha->flags.isp82xx_fw_hung = 1; + ql_log(ql_log_warn, vha, 0xb10a, + "Firmware hung.\n"); + qla82xx_clear_pending_mbx(vha); + } + + if (qla8044_check_temp(vha)) { + set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); + ha->flags.isp82xx_fw_hung = 1; + qla2xxx_wake_dpc(vha); + } else if (dev_state == QLA8XXX_DEV_NEED_RESET && + !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0xb0d6, + "%s: HW State: NEED RESET!\n", + __func__); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && + !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0xb0d7, + "%s: HW State: NEED QUIES detected!\n", + __func__); + set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + /* Check firmware health */ + if (ha->flags.isp82xx_fw_hung) { + halt_status = qla8044_rd_direct(vha, + QLA8044_PEG_HALT_STATUS1_INDEX); + if (halt_status & + QLA8044_HALT_STATUS_FW_RESET) { + ql_log(ql_log_fatal, vha, + 0xb0d8, "%s: Firmware " + "error detected device " + "is being reset\n", + __func__); + } else if (halt_status & + QLA8044_HALT_STATUS_UNRECOVERABLE) { + halt_status_unrecoverable = 1; + } + + /* Since we cannot change dev_state in interrupt + * context, set appropriate DPC flag then wakeup + * DPC */ + if (halt_status_unrecoverable) { + set_bit(ISP_UNRECOVERABLE, + &vha->dpc_flags); + } else { + if (dev_state == + QLA8XXX_DEV_QUIESCENT) { + set_bit(FCOE_CTX_RESET_NEEDED, + &vha->dpc_flags); + ql_log(ql_log_info, vha, 0xb0d9, + "%s: FW CONTEXT Reset " + "needed!\n", __func__); + } else { + ql_log(ql_log_info, vha, + 0xb0da, "%s: " + "detect abort needed\n", + __func__); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + } + qla2xxx_wake_dpc(vha); + } + } + + } +} + +static int +qla8044_minidump_process_control(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr) +{ + struct qla8044_minidump_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time, addr, index; + uint32_t crb_addr, rval = QLA_SUCCESS; + unsigned long wtime; + struct qla8044_minidump_template_hdr *tmplt_hdr; + int i; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__); + tmplt_hdr = (struct qla8044_minidump_template_hdr *) + ha->md_tmplt_hdr; + crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr; + + crb_addr = crb_entry->addr; + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + + if (opcode & QLA82XX_DBG_OPCODE_WR) { + qla8044_wr_reg_indirect(vha, crb_addr, + crb_entry->value_1); + opcode &= ~QLA82XX_DBG_OPCODE_WR; + } + + if (opcode & QLA82XX_DBG_OPCODE_RW) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + opcode &= ~QLA82XX_DBG_OPCODE_RW; + } + + if (opcode & QLA82XX_DBG_OPCODE_AND) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + read_value &= crb_entry->value_2; + opcode &= ~QLA82XX_DBG_OPCODE_AND; + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + } + if (opcode & QLA82XX_DBG_OPCODE_OR) { + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + read_value |= crb_entry->value_3; + qla8044_wr_reg_indirect(vha, crb_addr, read_value); + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + if (opcode & QLA82XX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + qla8044_rd_reg_indirect(vha, crb_addr, &read_value); + + do { + if ((read_value & crb_entry->value_2) == + crb_entry->value_1) { + break; + } else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_FUNCTION_FAILED; + break; + } else { + qla8044_rd_reg_indirect(vha, + crb_addr, &read_value); + } + } while (1); + opcode &= ~QLA82XX_DBG_OPCODE_POLL; + } + + if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + qla8044_rd_reg_indirect(vha, addr, &read_value); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else { + addr = crb_addr; + } + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else { + read_value = crb_entry->value_1; + } + + qla8044_wr_reg_indirect(vha, addr, read_value); + opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + return rval; +} + +static void +qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8044_minidump_entry_crb *crb_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__); + crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = r_addr; + *data_ptr++ = r_value; + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int +qla8044_minidump_process_rdmem(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla8044_minidump_entry_rdmem *m_hdr; + unsigned long flags; + uint32_t *data_ptr = *d_ptr; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__); + m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f0, + "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size); + + if (r_addr & 0xf) { + ql_dbg(ql_dbg_p3p, vha, 0xb0f1, + "[%s]: Read addr 0x%x not 16 bytes aligned\n", + __func__, r_addr); + return QLA_FUNCTION_FAILED; + } + + if (m_hdr->read_data_size % 16) { + ql_dbg(ql_dbg_p3p, vha, 0xb0f2, + "[%s]: Read data[0x%x] not multiple of 16 bytes\n", + __func__, m_hdr->read_data_size); + return QLA_FUNCTION_FAILED; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb0f3, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr); + r_value = 0; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value); + r_value = MIU_TA_CTL_ENABLE; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); + r_value = MIU_TA_CTL_START_ENABLE; + qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, + &r_value); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + write_unlock_irqrestore(&ha->hw_lock, flags); + return QLA_SUCCESS; + } + + for (j = 0; j < 4; j++) { + qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j], + &r_data); + *data_ptr++ = r_data; + } + + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + + ql_dbg(ql_dbg_p3p, vha, 0xb0f4, + "Leaving fn: %s datacount: 0x%x\n", + __func__, (loop_cnt * 16)); + + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +/* ISP83xx flash read for _RDROM _BOARD */ +static uint32_t +qla8044_minidump_process_rdrom(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t fl_addr, u32_count, rval; + struct qla8044_minidump_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr; + fl_addr = rom_hdr->read_addr; + u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t); + + ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n", + __func__, fl_addr, u32_count); + + rval = qla8044_lockless_flash_read_u32(vha, fl_addr, + (u8 *)(data_ptr), u32_count); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_fatal, vha, 0xb0f6, + "%s: Flash Read Error,Count=%d\n", __func__, u32_count); + return QLA_FUNCTION_FAILED; + } else { + data_ptr += u32_count; + *d_ptr = data_ptr; + return QLA_SUCCESS; + } +} + +static void +qla8044_mark_entry_skipped(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; + + ql_log(ql_log_info, vha, 0xb0f7, + "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", + vha->host_no, index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); +} + +static int +qla8044_minidump_process_l2tag(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla8044_minidump_entry_cache *cache_hdr; + int rval = QLA_FUNCTION_FAILED; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__); + cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; + + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + qla8044_wr_reg_indirect(vha, t_r_addr, t_value); + if (c_value_w) + qla8044_wr_reg_indirect(vha, c_addr, c_value_w); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + qla8044_rd_reg_indirect(vha, c_addr, + &c_value_r); + if ((c_value_r & p_mask) == 0) { + break; + } else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, addr, &r_value); + *data_ptr++ = r_value; + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static void +qla8044_minidump_process_l1cache(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla8044_minidump_entry_cache *cache_hdr; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + qla8044_wr_reg_indirect(vha, t_r_addr, t_value); + qla8044_wr_reg_indirect(vha, c_addr, c_value_w); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, addr, &r_value); + *data_ptr++ = r_value; + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_rdocm(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla8044_minidump_entry_rdocm *ocm_hdr; + uint32_t *data_ptr = *d_ptr; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__); + + ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fa, + "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, r_stride, loop_cnt); + + for (i = 0; i < loop_cnt; i++) { + r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase)); + *data_ptr++ = r_value; + r_addr += r_stride; + } + ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n", + __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))); + + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_rdmux(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla8044_minidump_entry_mux *mux_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__); + + mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, s_addr, s_value); + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = s_value; + *data_ptr++ = r_value; + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void +qla8044_minidump_process_queue(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla8044_minidump_entry_queue *q_hdr; + uint32_t *data_ptr = *d_ptr; + + ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__); + q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla8044_wr_reg_indirect(vha, s_addr, qid); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = r_value; + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +/* ISP83xx functions to process new minidump entries... */ +static uint32_t +qla8044_minidump_process_pollrd(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask; + uint16_t s_stride, i; + struct qla8044_minidump_entry_pollrd *pollrd_hdr; + uint32_t *data_ptr = *d_ptr; + + pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr; + s_addr = pollrd_hdr->select_addr; + r_addr = pollrd_hdr->read_addr; + s_value = pollrd_hdr->select_value; + s_stride = pollrd_hdr->select_value_stride; + + poll_wait = pollrd_hdr->poll_wait; + poll_mask = pollrd_hdr->poll_mask; + + for (i = 0; i < pollrd_hdr->op_count; i++) { + qla8044_wr_reg_indirect(vha, s_addr, s_value); + poll_wait = pollrd_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, s_addr, &r_value); + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb0fe, + "%s: TIMEOUT\n", __func__); + goto error; + } + } + } + qla8044_rd_reg_indirect(vha, r_addr, &r_value); + *data_ptr++ = s_value; + *data_ptr++ = r_value; + + s_value += s_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; + +error: + return QLA_FUNCTION_FAILED; +} + +static void +qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t sel_val1, sel_val2, t_sel_val, data, i; + uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr; + struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr; + uint32_t *data_ptr = *d_ptr; + + rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr; + sel_val1 = rdmux2_hdr->select_value_1; + sel_val2 = rdmux2_hdr->select_value_2; + sel_addr1 = rdmux2_hdr->select_addr_1; + sel_addr2 = rdmux2_hdr->select_addr_2; + sel_val_mask = rdmux2_hdr->select_value_mask; + read_addr = rdmux2_hdr->read_addr; + + for (i = 0; i < rdmux2_hdr->op_count; i++) { + qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1); + t_sel_val = sel_val1 & sel_val_mask; + *data_ptr++ = t_sel_val; + + qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); + qla8044_rd_reg_indirect(vha, read_addr, &data); + + *data_ptr++ = data; + + qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2); + t_sel_val = sel_val2 & sel_val_mask; + *data_ptr++ = t_sel_val; + + qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val); + qla8044_rd_reg_indirect(vha, read_addr, &data); + + *data_ptr++ = data; + + sel_val1 += rdmux2_hdr->select_value_stride; + sel_val2 += rdmux2_hdr->select_value_stride; + } + + *d_ptr = data_ptr; +} + +static uint32_t +qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t poll_wait, poll_mask, r_value, data; + uint32_t addr_1, addr_2, value_1, value_2; + struct qla8044_minidump_entry_pollrdmwr *poll_hdr; + uint32_t *data_ptr = *d_ptr; + + poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr; + addr_1 = poll_hdr->addr_1; + addr_2 = poll_hdr->addr_2; + value_1 = poll_hdr->value_1; + value_2 = poll_hdr->value_2; + poll_mask = poll_hdr->poll_mask; + + qla8044_wr_reg_indirect(vha, addr_1, value_1); + + poll_wait = poll_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb0ff, + "%s: TIMEOUT\n", __func__); + goto error; + } + } + } + + qla8044_rd_reg_indirect(vha, addr_2, &data); + data &= poll_hdr->modify_mask; + qla8044_wr_reg_indirect(vha, addr_2, data); + qla8044_wr_reg_indirect(vha, addr_1, value_2); + + poll_wait = poll_hdr->poll_wait; + while (1) { + qla8044_rd_reg_indirect(vha, addr_1, &r_value); + + if ((r_value & poll_mask) != 0) { + break; + } else { + usleep_range(1000, 1100); + if (--poll_wait == 0) { + ql_log(ql_log_fatal, vha, 0xb100, + "%s: TIMEOUT2\n", __func__); + goto error; + } + } + } + + *data_ptr++ = addr_2; + *data_ptr++ = data; + + *d_ptr = data_ptr; + + return QLA_SUCCESS; + +error: + return QLA_FUNCTION_FAILED; +} + +#define ISP8044_PEX_DMA_ENGINE_INDEX 8 +#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000 +#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000 +#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0 +#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04 +#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08 + +#define ISP8044_PEX_DMA_READ_SIZE (16 * 1024) +#define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ + +static int +qla8044_check_dma_engine_state(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = ha->md_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); + + /* Read the pex-dma's command-status-and-control register. */ + rval = qla8044_rd_reg_indirect(vha, + (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + return QLA_FUNCTION_FAILED; + + /* Check if requested pex-dma engine is available. */ + if (cmd_sts_and_cntrl & BIT_31) + return QLA_SUCCESS; + + return QLA_FUNCTION_FAILED; +} + +static int +qla8044_start_pex_dma(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS, wait = 0; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla8044_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = ha->md_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET); + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW, + m_hdr->desc_card_addr); + if (rval) + goto error_exit; + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0); + if (rval) + goto error_exit; + + rval = qla8044_wr_reg_indirect(vha, + dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL, + m_hdr->start_dma_cmd); + if (rval) + goto error_exit; + + /* Wait for dma operation to complete. */ + for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) { + rval = qla8044_rd_reg_indirect(vha, + (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + goto error_exit; + + if ((cmd_sts_and_cntrl & BIT_1) == 0) + break; + + udelay(10); + } + + /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ + if (wait >= ISP8044_PEX_DMA_MAX_WAIT) { + rval = QLA_FUNCTION_FAILED; + goto error_exit; + } + +error_exit: + return rval; +} + +static int +qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + int rval = QLA_SUCCESS; + struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL; + uint32_t chunk_size, read_size; + uint8_t *data_ptr = (uint8_t *)*d_ptr; + void *rdmem_buffer = NULL; + dma_addr_t rdmem_dma; + struct qla8044_pex_dma_descriptor dma_desc; + + rval = qla8044_check_dma_engine_state(vha); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, vha, 0xb147, + "DMA engine not available. Fallback to rdmem-read.\n"); + return QLA_FUNCTION_FAILED; + } + + m_hdr = (void *)entry_hdr; + + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, + ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL); + if (!rdmem_buffer) { + ql_dbg(ql_dbg_p3p, vha, 0xb148, + "Unable to allocate rdmem dma buffer\n"); + return QLA_FUNCTION_FAILED; + } + + /* Prepare pex-dma descriptor to be written to MS memory. */ + /* dma-desc-cmd layout: + * 0-3: dma-desc-cmd 0-3 + * 4-7: pcid function number + * 8-15: dma-desc-cmd 8-15 + * dma_bus_addr: dma buffer address + * cmd.read_data_size: amount of data-chunk to be read. + */ + dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); + dma_desc.cmd.dma_desc_cmd |= + ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); + + dma_desc.dma_bus_addr = rdmem_dma; + dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE; + read_size = 0; + + /* + * Perform rdmem operation using pex-dma. + * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE. + */ + while (read_size < m_hdr->read_data_size) { + if (m_hdr->read_data_size - read_size < + ISP8044_PEX_DMA_READ_SIZE) { + chunk_size = (m_hdr->read_data_size - read_size); + dma_desc.cmd.read_data_size = chunk_size; + } + + dma_desc.src_addr = m_hdr->read_addr + read_size; + + /* Prepare: Write pex-dma descriptor to MS memory. */ + rval = qla8044_ms_mem_write_128b(vha, + m_hdr->desc_card_addr, (void *)&dma_desc, + (sizeof(struct qla8044_pex_dma_descriptor)/16)); + if (rval) { + ql_log(ql_log_warn, vha, 0xb14a, + "%s: Error writing rdmem-dma-init to MS !!!\n", + __func__); + goto error_exit; + } + ql_dbg(ql_dbg_p3p, vha, 0xb14b, + "%s: Dma-descriptor: Instruct for rdmem dma " + "(chunk_size 0x%x).\n", __func__, chunk_size); + + /* Execute: Start pex-dma operation. */ + rval = qla8044_start_pex_dma(vha, m_hdr); + if (rval) + goto error_exit; + + memcpy(data_ptr, rdmem_buffer, chunk_size); + data_ptr += chunk_size; + read_size += chunk_size; + } + + *d_ptr = (void *)data_ptr; + +error_exit: + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE, + rdmem_buffer, rdmem_dma); + + return rval; +} + +static uint32_t +qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + int loop_cnt; + uint32_t addr1, addr2, value, data, temp, wrVal; + uint8_t stride, stride2; + uint16_t count; + uint32_t poll, mask, data_size, modify_mask; + uint32_t wait_count = 0; + + uint32_t *data_ptr = *d_ptr; + + struct qla8044_minidump_entry_rddfe *rddfe; + rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr; + + addr1 = rddfe->addr_1; + value = rddfe->value; + stride = rddfe->stride; + stride2 = rddfe->stride2; + count = rddfe->count; + + poll = rddfe->poll; + mask = rddfe->mask; + modify_mask = rddfe->modify_mask; + data_size = rddfe->data_size; + + addr2 = addr1 + stride; + + for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { + qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value)); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb153, + "%s: TIMEOUT\n", __func__); + goto error; + } else { + qla8044_rd_reg_indirect(vha, addr2, &temp); + temp = temp & modify_mask; + temp = (temp | ((loop_cnt << 16) | loop_cnt)); + wrVal = ((temp << 16) | temp); + + qla8044_wr_reg_indirect(vha, addr2, wrVal); + qla8044_wr_reg_indirect(vha, addr1, value); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb154, + "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_wr_reg_indirect(vha, addr1, + ((0x40000000 | value) + stride2)); + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb155, + "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_rd_reg_indirect(vha, addr2, &data); + + *data_ptr++ = wrVal; + *data_ptr++ = data; + } + + } + + *d_ptr = data_ptr; + return QLA_SUCCESS; + +error: + return -1; + +} + +static uint32_t +qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + int ret = 0; + uint32_t addr1, addr2, value1, value2, data, selVal; + uint8_t stride1, stride2; + uint32_t addr3, addr4, addr5, addr6, addr7; + uint16_t count, loop_cnt; + uint32_t poll, mask; + uint32_t *data_ptr = *d_ptr; + + struct qla8044_minidump_entry_rdmdio *rdmdio; + + rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr; + + addr1 = rdmdio->addr_1; + addr2 = rdmdio->addr_2; + value1 = rdmdio->value_1; + stride1 = rdmdio->stride_1; + stride2 = rdmdio->stride_2; + count = rdmdio->count; + + poll = rdmdio->poll; + mask = rdmdio->mask; + value2 = rdmdio->value_2; + + addr3 = addr1 + stride1; + + for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { + ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, + addr3, mask); + if (ret == -1) + goto error; + + addr4 = addr2 - stride1; + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, + value2); + if (ret == -1) + goto error; + + addr5 = addr2 - (2 * stride1); + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, + value1); + if (ret == -1) + goto error; + + addr6 = addr2 - (3 * stride1); + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, + addr6, 0x2); + if (ret == -1) + goto error; + + ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, + addr3, mask); + if (ret == -1) + goto error; + + addr7 = addr2 - (4 * stride1); + data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, + mask, addr7); + if (data == -1) + goto error; + + selVal = (value2 << 18) | (value1 << 2) | 2; + + stride2 = rdmdio->stride_2; + *data_ptr++ = selVal; + *data_ptr++ = data; + + value1 = value1 + stride2; + *d_ptr = data_ptr; + } + + return 0; + +error: + return -1; +} + +static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t addr1, addr2, value1, value2, poll, mask, r_value; + uint32_t wait_count = 0; + struct qla8044_minidump_entry_pollwr *pollwr_hdr; + + pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; + addr1 = pollwr_hdr->addr_1; + addr2 = pollwr_hdr->addr_2; + value1 = pollwr_hdr->value_1; + value2 = pollwr_hdr->value_2; + + poll = pollwr_hdr->poll; + mask = pollwr_hdr->mask; + + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_wr_reg_indirect(vha, addr2, value2); + qla8044_wr_reg_indirect(vha, addr1, value1); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + + return QLA_SUCCESS; + +error: + return -1; +} + +/* + * + * qla8044_collect_md_data - Retrieve firmware minidump data. + * @ha: pointer to adapter structure + **/ +int +qla8044_collect_md_data(struct scsi_qla_host *vha) +{ + int num_entry_hdr = 0; + struct qla8044_minidump_entry_hdr *entry_hdr; + struct qla8044_minidump_template_hdr *tmplt_hdr; + uint32_t *data_ptr; + uint32_t data_collected = 0, f_capture_mask; + int i, rval = QLA_FUNCTION_FAILED; + uint64_t now; + uint32_t timestamp, idc_control; + struct qla_hw_data *ha = vha->hw; + + if (!ha->md_dump) { + ql_log(ql_log_info, vha, 0xb101, + "%s(%ld) No buffer to dump\n", + __func__, vha->host_no); + return rval; + } + + if (ha->fw_dumped) { + ql_log(ql_log_warn, vha, 0xb10d, + "Firmware has been previously dumped (%p) " + "-- ignoring request.\n", ha->fw_dump); + goto md_failed; + } + + ha->fw_dumped = 0; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb10e, + "Memory not allocated for minidump capture\n"); + goto md_failed; + } + + qla8044_idc_lock(ha); + idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + if (idc_control & GRACEFUL_RESET_BIT1) { + ql_log(ql_log_warn, vha, 0xb112, + "Forced reset from application, " + "ignore minidump capture\n"); + qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, + (idc_control & ~GRACEFUL_RESET_BIT1)); + qla8044_idc_unlock(ha); + + goto md_failed; + } + qla8044_idc_unlock(ha); + + if (qla82xx_validate_template_chksum(vha)) { + ql_log(ql_log_info, vha, 0xb109, + "Template checksum validation error\n"); + goto md_failed; + } + + tmplt_hdr = (struct qla8044_minidump_template_hdr *) + ha->md_tmplt_hdr; + data_ptr = (uint32_t *)((uint8_t *)ha->md_dump); + num_entry_hdr = tmplt_hdr->num_of_entries; + + ql_dbg(ql_dbg_p3p, vha, 0xb11a, + "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); + + f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; + + /* Validate whether required debug level is set */ + if ((f_capture_mask & 0x3) != 0x3) { + ql_log(ql_log_warn, vha, 0xb10f, + "Minimum required capture mask[0x%x] level not set\n", + f_capture_mask); + + } + tmplt_hdr->driver_capture_mask = ql2xmdcapmask; + ql_log(ql_log_info, vha, 0xb102, + "[%s]: starting data ptr: %p\n", + __func__, data_ptr); + ql_log(ql_log_info, vha, 0xb10b, + "[%s]: no of entry headers in Template: 0x%x\n", + __func__, num_entry_hdr); + ql_log(ql_log_info, vha, 0xb10c, + "[%s]: Total_data_size 0x%x, %d obtained\n", + __func__, ha->md_dump_size, ha->md_dump_size); + + /* Update current timestamp before taking dump */ + now = get_jiffies_64(); + timestamp = (u32)(jiffies_to_msecs(now) / 1000); + tmplt_hdr->driver_timestamp = timestamp; + + entry_hdr = (struct qla8044_minidump_entry_hdr *) + (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); + tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] = + tmplt_hdr->ocm_window_reg[ha->portnum]; + + /* Walk through the entry headers - validate/perform required action */ + for (i = 0; i < num_entry_hdr; i++) { + if (data_collected > ha->md_dump_size) { + ql_log(ql_log_info, vha, 0xb103, + "Data collected: [0x%x], " + "Total Dump size: [0x%x]\n", + data_collected, ha->md_dump_size); + return rval; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ql2xmdcapmask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA82XX_DBG_SKIPPED_FLAG; + goto skip_nxt_entry; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb104, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, + (ha->md_dump_size - data_collected)); + + /* Decode the entry type and take required action to capture + * debug data + */ + switch (entry_hdr->entry_type) { + case QLA82XX_RDEND: + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_CNTRL: + rval = qla8044_minidump_process_control(vha, + entry_hdr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_RDCRB: + qla8044_minidump_process_rdcrb(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMEM: + rval = qla8044_minidump_pex_dma_read(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + rval = qla8044_minidump_process_rdmem(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, + entry_hdr, i); + goto md_failed; + } + } + break; + case QLA82XX_BOARD: + case QLA82XX_RDROM: + rval = qla8044_minidump_process_rdrom(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, + entry_hdr, i); + } + break; + case QLA82XX_L2DTG: + case QLA82XX_L2ITG: + case QLA82XX_L2DAT: + case QLA82XX_L2INS: + rval = qla8044_minidump_process_l2tag(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla8044_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA8044_L1DTG: + case QLA8044_L1ITG: + case QLA82XX_L1DAT: + case QLA82XX_L1INS: + qla8044_minidump_process_l1cache(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDOCM: + qla8044_minidump_process_rdocm(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMUX: + qla8044_minidump_process_rdmux(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_QUEUE: + qla8044_minidump_process_queue(vha, + entry_hdr, &data_ptr); + break; + case QLA8044_POLLRD: + rval = qla8044_minidump_process_pollrd(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDMUX2: + qla8044_minidump_process_rdmux2(vha, + entry_hdr, &data_ptr); + break; + case QLA8044_POLLRDMWR: + rval = qla8044_minidump_process_pollrdmwr(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDDFE: + rval = qla8044_minidump_process_rddfe(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDMDIO: + rval = qla8044_minidump_process_rdmdio(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_POLLWR: + rval = qla8044_minidump_process_pollwr(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_RDNOP: + default: + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + } + + data_collected = (uint8_t *)data_ptr - + (uint8_t *)((uint8_t *)ha->md_dump); +skip_nxt_entry: + /* + * next entry in the template + */ + entry_hdr = (struct qla8044_minidump_entry_hdr *) + (((uint8_t *)entry_hdr) + entry_hdr->entry_size); + } + + if (data_collected != ha->md_dump_size) { + ql_log(ql_log_info, vha, 0xb105, + "Dump data mismatch: Data collected: " + "[0x%x], total_data_size:[0x%x]\n", + data_collected, ha->md_dump_size); + rval = QLA_FUNCTION_FAILED; + goto md_failed; + } + + ql_log(ql_log_info, vha, 0xb110, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); + ha->fw_dumped = 1; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + + + ql_log(ql_log_info, vha, 0xb106, + "Leaving fn: %s Last entry: 0x%x\n", + __func__, i); +md_failed: + return rval; +} + +void +qla8044_get_minidump(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!qla8044_collect_md_data(vha)) { + ha->fw_dumped = 1; + ha->prev_minidump_failed = 0; + } else { + ql_log(ql_log_fatal, vha, 0xb0db, + "%s: Unable to collect minidump\n", + __func__); + ha->prev_minidump_failed = 1; + } +} + +static int +qla8044_poll_flash_status_reg(struct scsi_qla_host *vha) +{ + uint32_t flash_status; + int retries = QLA8044_FLASH_READ_RETRY_COUNT; + int ret_val = QLA_SUCCESS; + + while (retries--) { + ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS, + &flash_status); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb13c, + "%s: Failed to read FLASH_STATUS reg.\n", + __func__); + break; + } + if ((flash_status & QLA8044_FLASH_STATUS_READY) == + QLA8044_FLASH_STATUS_READY) + break; + msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY); + } + + if (!retries) + ret_val = QLA_FUNCTION_FAILED; + + return ret_val; +} + +static int +qla8044_write_flash_status_reg(struct scsi_qla_host *vha, + uint32_t data) +{ + int ret_val = QLA_SUCCESS; + uint32_t cmd; + + cmd = vha->hw->fdt_wrt_sts_reg_cmd; + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb125, + "%s: Failed to write to FLASH_ADDR.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb126, + "%s: Failed to write to FLASH_WRDATA.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_SECOND_ERASE_MS_VAL); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb127, + "%s: Failed to write to FLASH_CONTROL.\n", __func__); + goto exit_func; + } + + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb128, + "%s: Error polling flash status reg.\n", __func__); + +exit_func: + return ret_val; +} + +/* + * This function assumes that the flash lock is held. + */ +static int +qla8044_unprotect_flash(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb139, + "%s: Write flash status failed.\n", __func__); + + return ret_val; +} + +/* + * This function assumes that the flash lock is held. + */ +static int +qla8044_protect_flash(scsi_qla_host_t *vha) +{ + int ret_val; + struct qla_hw_data *ha = vha->hw; + + ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable); + if (ret_val) + ql_log(ql_log_warn, vha, 0xb13b, + "%s: Write flash status failed.\n", __func__); + + return ret_val; +} + + +static int +qla8044_erase_flash_sector(struct scsi_qla_host *vha, + uint32_t sector_start_addr) +{ + uint32_t reversed_addr; + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb12e, + "%s: Poll flash status after erase failed..\n", __func__); + } + + reversed_addr = (((sector_start_addr & 0xFF) << 16) | + (sector_start_addr & 0xFF00) | + ((sector_start_addr & 0xFF0000) >> 16)); + + ret_val = qla8044_wr_reg_indirect(vha, + QLA8044_FLASH_WRDATA, reversed_addr); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb12f, + "%s: Failed to write to FLASH_WRDATA.\n", __func__); + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb130, + "%s: Failed to write to FLASH_ADDR.\n", __func__); + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_LAST_ERASE_MS_VAL); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb131, + "%s: Failed write to FLASH_CONTROL.\n", __func__); + } + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb132, + "%s: Poll flash status failed.\n", __func__); + } + + + return ret_val; +} + +/* + * qla8044_flash_write_u32 - Write data to flash + * + * @ha : Pointer to adapter structure + * addr : Flash address to write to + * p_data : Data to be written + * + * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED + * + * NOTE: Lock should be held on entry + */ +static int +qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr, + uint32_t *p_data) +{ + int ret_val = QLA_SUCCESS; + + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + 0x00800000 | (addr >> 2)); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb134, + "%s: Failed write to FLASH_ADDR.\n", __func__); + goto exit_func; + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb135, + "%s: Failed write to FLASH_WRDATA.\n", __func__); + goto exit_func; + } + ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb136, + "%s: Failed write to FLASH_CONTROL.\n", __func__); + goto exit_func; + } + ret_val = qla8044_poll_flash_status_reg(vha); + if (ret_val) { + ql_log(ql_log_warn, vha, 0xb137, + "%s: Poll flash status failed.\n", __func__); + } + +exit_func: + return ret_val; +} + +static int +qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr, + uint32_t faddr, uint32_t dwords) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t spi_val; + + if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS || + dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) { + ql_dbg(ql_dbg_user, vha, 0xb123, + "Got unsupported dwords = 0x%x.\n", + dwords); + return QLA_FUNCTION_FAILED; + } + + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + spi_val | QLA8044_FLASH_SPI_CTL); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_FIRST_TEMP_VAL); + + /* First DWORD write to FLASH_WRDATA */ + ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, + *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_FIRST_MS_PATTERN); + + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb124, + "%s: Failed.\n", __func__); + goto exit_func; + } + + dwords--; + + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_SECOND_TEMP_VAL); + + + /* Second to N-1 DWORDS writes */ + while (dwords != 1) { + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_SECOND_MS_PATTERN); + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb129, + "%s: Failed.\n", __func__); + goto exit_func; + } + dwords--; + } + + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR, + QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2)); + + /* Last DWORD write */ + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, + QLA8044_FLASH_LAST_MS_PATTERN); + ret = qla8044_poll_flash_status_reg(vha); + if (ret) { + ql_log(ql_log_warn, vha, 0xb12a, + "%s: Failed.\n", __func__); + goto exit_func; + } + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val); + + if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) { + ql_log(ql_log_warn, vha, 0xb12b, + "%s: Failed.\n", __func__); + spi_val = 0; + /* Operation failed, clear error bit. */ + qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + &spi_val); + qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, + spi_val | QLA8044_FLASH_SPI_CTL); + } +exit_func: + return ret; +} + +static int +qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr, + uint32_t faddr, uint32_t dwords) +{ + int ret = QLA_FUNCTION_FAILED; + uint32_t liter; + + for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { + ret = qla8044_flash_write_u32(vha, faddr, dwptr); + if (ret) { + ql_dbg(ql_dbg_p3p, vha, 0xb141, + "%s: flash address=%x data=%x.\n", __func__, + faddr, *dwptr); + break; + } + } + + return ret; +} + +int +qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, + uint32_t offset, uint32_t length) +{ + int rval = QLA_FUNCTION_FAILED, i, burst_iter_count; + int dword_count, erase_sec_count; + uint32_t erase_offset; + uint8_t *p_cache, *p_src; + + erase_offset = offset; + + p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL); + if (!p_cache) + return QLA_FUNCTION_FAILED; + + memcpy(p_cache, buf, length); + p_src = p_cache; + dword_count = length / sizeof(uint32_t); + /* Since the offset and legth are sector aligned, it will be always + * multiple of burst_iter_count (64) + */ + burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS; + erase_sec_count = length / QLA8044_SECTOR_SIZE; + + /* Suspend HBA. */ + scsi_block_requests(vha->host); + /* Lock and enable write for whole operation. */ + qla8044_flash_lock(vha); + qla8044_unprotect_flash(vha); + + /* Erasing the sectors */ + for (i = 0; i < erase_sec_count; i++) { + rval = qla8044_erase_flash_sector(vha, erase_offset); + ql_dbg(ql_dbg_user, vha, 0xb138, + "Done erase of sector=0x%x.\n", + erase_offset); + if (rval) { + ql_log(ql_log_warn, vha, 0xb121, + "Failed to erase the sector having address: " + "0x%x.\n", erase_offset); + goto out; + } + erase_offset += QLA8044_SECTOR_SIZE; + } + ql_dbg(ql_dbg_user, vha, 0xb13f, + "Got write for addr = 0x%x length=0x%x.\n", + offset, length); + + for (i = 0; i < burst_iter_count; i++) { + + /* Go with write. */ + rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src, + offset, QLA8044_MAX_OPTROM_BURST_DWORDS); + if (rval) { + /* Buffer Mode failed skip to dword mode */ + ql_log(ql_log_warn, vha, 0xb122, + "Failed to write flash in buffer mode, " + "Reverting to slow-write.\n"); + rval = qla8044_write_flash_dword_mode(vha, + (uint32_t *)p_src, offset, + QLA8044_MAX_OPTROM_BURST_DWORDS); + } + p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; + offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS; + } + ql_dbg(ql_dbg_user, vha, 0xb133, + "Done writing.\n"); + +out: + qla8044_protect_flash(vha); + qla8044_flash_unlock(vha); + scsi_unblock_requests(vha->host); + kfree(p_cache); + + return rval; +} + +#define LEG_INT_PTR_B31 (1 << 31) +#define LEG_INT_PTR_B30 (1 << 30) +#define PF_BITS_MASK (0xF << 16) +/** + * qla8044_intr_handler() - Process interrupts for the ISP8044 + * @irq: + * @dev_id: SCSI driver HA context + * + * Called by system whenever the host adapter generates an interrupt. + * + * Returns handled flag. + */ +irqreturn_t +qla8044_intr_handler(int irq, void *dev_id) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + struct rsp_que *rsp; + struct device_reg_82xx __iomem *reg; + int status = 0; + unsigned long flags; + unsigned long iter; + uint32_t stat; + uint16_t mb[4]; + uint32_t leg_int_ptr = 0, pf_bit; + + rsp = (struct rsp_que *) dev_id; + if (!rsp) { + ql_log(ql_log_info, NULL, 0xb143, + "%s(): NULL response queue pointer\n", __func__); + return IRQ_NONE; + } + ha = rsp->hw; + vha = pci_get_drvdata(ha->pdev); + + if (unlikely(pci_channel_offline(ha->pdev))) + return IRQ_HANDLED; + + leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); + + /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ + if (!(leg_int_ptr & (LEG_INT_PTR_B31))) { + ql_dbg(ql_dbg_p3p, vha, 0xb144, + "%s: Legacy Interrupt Bit 31 not set, " + "spurious interrupt!\n", __func__); + return IRQ_NONE; + } + + pf_bit = ha->portnum << 16; + /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ + if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) { + ql_dbg(ql_dbg_p3p, vha, 0xb145, + "%s: Incorrect function ID 0x%x in " + "legacy interrupt register, " + "ha->pf_bit = 0x%x\n", __func__, + (leg_int_ptr & (PF_BITS_MASK)), pf_bit); + return IRQ_NONE; + } + + /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger + * Control register and poll till Legacy Interrupt Pointer register + * bit32 is 0. + */ + qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0); + do { + leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET); + if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) + break; + } while (leg_int_ptr & (LEG_INT_PTR_B30)); + + reg = &ha->iobase->isp82; + spin_lock_irqsave(&ha->hardware_lock, flags); + for (iter = 1; iter--; ) { + + if (RD_REG_DWORD(®->host_int)) { + stat = RD_REG_DWORD(®->host_status); + if ((stat & HSRX_RISC_INT) == 0) + break; + + switch (stat & 0xff) { + case 0x1: + case 0x2: + case 0x10: + case 0x11: + qla82xx_mbx_completion(vha, MSW(stat)); + status |= MBX_INTERRUPT; + break; + case 0x12: + mb[0] = MSW(stat); + mb[1] = RD_REG_WORD(®->mailbox_out[1]); + mb[2] = RD_REG_WORD(®->mailbox_out[2]); + mb[3] = RD_REG_WORD(®->mailbox_out[3]); + qla2x00_async_event(vha, rsp, mb); + break; + case 0x13: + qla24xx_process_response_queue(vha, rsp); + break; + default: + ql_dbg(ql_dbg_p3p, vha, 0xb146, + "Unrecognized interrupt type " + "(%d).\n", stat & 0xff); + break; + } + } + WRT_REG_DWORD(®->host_int, 0); + } + + qla2x00_handle_mbx_completion(ha, status); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return IRQ_HANDLED; +} + +static int +qla8044_idc_dontreset(struct qla_hw_data *ha) +{ + uint32_t idc_ctrl; + + idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL); + return idc_ctrl & DONTRESET_BIT0; +} + +static void +qla8044_clear_rst_ready(scsi_qla_host_t *vha) +{ + uint32_t drv_state; + + drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); + + /* + * For ISP8044, drv_active register has 1 bit per function, + * shift 1 by func_num to set a bit for the function. + * For ISP82xx, drv_active has 4 bits per function + */ + drv_state &= ~(1 << vha->hw->portnum); + + ql_dbg(ql_dbg_p3p, vha, 0xb13d, + "drv_state: 0x%08x\n", drv_state); + qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state); +} + +int +qla8044_abort_isp(scsi_qla_host_t *vha) +{ + int rval; + uint32_t dev_state; + struct qla_hw_data *ha = vha->hw; + + qla8044_idc_lock(ha); + dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + + if (ql2xdontresethba) + qla8044_set_idc_dontreset(vha); + + /* If device_state is NEED_RESET, go ahead with + * Reset,irrespective of ql2xdontresethba. This is to allow a + * non-reset-owner to force a reset. Non-reset-owner sets + * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset + * and then forces a Reset by setting device_state to + * NEED_RESET. */ + if (dev_state == QLA8XXX_DEV_READY) { + /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset + * recovery */ + if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) { + ql_dbg(ql_dbg_p3p, vha, 0xb13e, + "Reset recovery disabled\n"); + rval = QLA_FUNCTION_FAILED; + goto exit_isp_reset; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb140, + "HW State: NEED RESET\n"); + qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_NEED_RESET); + } + + /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority + * and which drivers are present. Unlike ISP82XX, the function setting + * NEED_RESET, may not be the Reset owner. */ + qla83xx_reset_ownership(vha); + + qla8044_idc_unlock(ha); + rval = qla8044_device_state_handler(vha); + qla8044_idc_lock(ha); + qla8044_clear_rst_ready(vha); + +exit_isp_reset: + qla8044_idc_unlock(ha); + if (rval == QLA_SUCCESS) { + ha->flags.isp82xx_fw_hung = 0; + ha->flags.nic_core_reset_hdlr_active = 0; + rval = qla82xx_restart_isp(vha); + } + + return rval; +} + +void +qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked) +{ + struct qla_hw_data *ha = vha->hw; + + if (!ha->allow_cna_fw_dump) + return; + + scsi_block_requests(vha->host); + ha->flags.isp82xx_no_md_cap = 1; + qla8044_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla8044_idc_unlock(ha); + qla2x00_wait_for_chip_reset(vha); + scsi_unblock_requests(vha->host); +} diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h new file mode 100644 index 00000000000..ada36057d7c --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -0,0 +1,599 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ + +#ifndef __QLA_NX2_H +#define __QLA_NX2_H + +#define QSNT_ACK_TOV 30 +#define INTENT_TO_RECOVER 0x01 +#define PROCEED_TO_RECOVER 0x02 +#define IDC_LOCK_RECOVERY_OWNER_MASK 0x3C +#define IDC_LOCK_RECOVERY_STATE_MASK 0x3 +#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS 2 + +#define QLA8044_DRV_LOCK_MSLEEP 200 +#define QLA8044_ADDR_DDR_NET (0x0000000000000000ULL) +#define QLA8044_ADDR_DDR_NET_MAX (0x000000000fffffffULL) + +#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0 +#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4 +#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0 +#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4 +#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8 +#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC +#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8 +#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC + +/* MIU_TEST_AGT_CTRL flags. work for SIU as well */ +#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE) +#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE | \ + MIU_TA_CTL_START) +#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE) + +/* Imbus address bit used to indicate a host address. This bit is + * eliminated by the pcie bar and bar select before presentation + * over pcie. */ +/* host memory via IMBUS */ +#define QLA8044_P2_ADDR_PCIE (0x0000000800000000ULL) +#define QLA8044_P3_ADDR_PCIE (0x0000008000000000ULL) +#define QLA8044_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL) +#define QLA8044_ADDR_OCM0 (0x0000000200000000ULL) +#define QLA8044_ADDR_OCM0_MAX (0x00000002000fffffULL) +#define QLA8044_ADDR_OCM1 (0x0000000200400000ULL) +#define QLA8044_ADDR_OCM1_MAX (0x00000002004fffffULL) +#define QLA8044_ADDR_QDR_NET (0x0000000300000000ULL) +#define QLA8044_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) +#define QLA8044_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) +#define QLA8044_ADDR_QDR_NET_MAX (0x0000000307ffffffULL) +#define QLA8044_PCI_CRBSPACE ((unsigned long)0x06000000) +#define QLA8044_PCI_DIRECT_CRB ((unsigned long)0x04400000) +#define QLA8044_PCI_CAMQM ((unsigned long)0x04800000) +#define QLA8044_PCI_CAMQM_MAX ((unsigned long)0x04ffffff) +#define QLA8044_PCI_DDR_NET ((unsigned long)0x00000000) +#define QLA8044_PCI_QDR_NET ((unsigned long)0x04000000) +#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff) + +/* PCI Windowing for DDR regions. */ +#define QLA8044_ADDR_IN_RANGE(addr, low, high) \ + (((addr) <= (high)) && ((addr) >= (low))) + +/* Indirectly Mapped Registers */ +#define QLA8044_FLASH_SPI_STATUS 0x2808E010 +#define QLA8044_FLASH_SPI_CONTROL 0x2808E014 +#define QLA8044_FLASH_STATUS 0x42100004 +#define QLA8044_FLASH_CONTROL 0x42110004 +#define QLA8044_FLASH_ADDR 0x42110008 +#define QLA8044_FLASH_WRDATA 0x4211000C +#define QLA8044_FLASH_RDDATA 0x42110018 +#define QLA8044_FLASH_DIRECT_WINDOW 0x42110030 +#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA)) + +/* Flash access regs */ +#define QLA8044_FLASH_LOCK 0x3850 +#define QLA8044_FLASH_UNLOCK 0x3854 +#define QLA8044_FLASH_LOCK_ID 0x3500 + +/* Driver Lock regs */ +#define QLA8044_DRV_LOCK 0x3868 +#define QLA8044_DRV_UNLOCK 0x386C +#define QLA8044_DRV_LOCK_ID 0x3504 +#define QLA8044_DRV_LOCKRECOVERY 0x379C + +/* IDC version */ +#define QLA8044_IDC_VER_MAJ_VALUE 0x1 +#define QLA8044_IDC_VER_MIN_VALUE 0x0 + +/* IDC Registers : Driver Coexistence Defines */ +#define QLA8044_CRB_IDC_VER_MAJOR 0x3780 +#define QLA8044_CRB_IDC_VER_MINOR 0x3798 +#define QLA8044_IDC_DRV_AUDIT 0x3794 +#define QLA8044_SRE_SHIM_CONTROL 0x0D200284 +#define QLA8044_PORT0_RXB_PAUSE_THRS 0x0B2003A4 +#define QLA8044_PORT1_RXB_PAUSE_THRS 0x0B2013A4 +#define QLA8044_PORT0_RXB_TC_MAX_CELL 0x0B200388 +#define QLA8044_PORT1_RXB_TC_MAX_CELL 0x0B201388 +#define QLA8044_PORT0_RXB_TC_STATS 0x0B20039C +#define QLA8044_PORT1_RXB_TC_STATS 0x0B20139C +#define QLA8044_PORT2_IFB_PAUSE_THRS 0x0B200704 +#define QLA8044_PORT3_IFB_PAUSE_THRS 0x0B201704 + +/* set value to pause threshold value */ +#define QLA8044_SET_PAUSE_VAL 0x0 +#define QLA8044_SET_TC_MAX_CELL_VAL 0x03FF03FF +#define QLA8044_PEG_HALT_STATUS1 0x34A8 +#define QLA8044_PEG_HALT_STATUS2 0x34AC +#define QLA8044_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */ +#define QLA8044_FW_CAPABILITIES 0x3528 +#define QLA8044_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */ +#define QLA8044_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */ +#define QLA8044_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */ +#define QLA8044_CRB_DRV_SCRATCH 0x3548 +#define QLA8044_CRB_DEV_PART_INFO1 0x37E0 +#define QLA8044_CRB_DEV_PART_INFO2 0x37E4 +#define QLA8044_FW_VER_MAJOR 0x3550 +#define QLA8044_FW_VER_MINOR 0x3554 +#define QLA8044_FW_VER_SUB 0x3558 +#define QLA8044_NPAR_STATE 0x359C +#define QLA8044_FW_IMAGE_VALID 0x35FC +#define QLA8044_CMDPEG_STATE 0x3650 +#define QLA8044_ASIC_TEMP 0x37B4 +#define QLA8044_FW_API 0x356C +#define QLA8044_DRV_OP_MODE 0x3570 +#define QLA8044_CRB_WIN_BASE 0x3800 +#define QLA8044_CRB_WIN_FUNC(f) (QLA8044_CRB_WIN_BASE+((f)*4)) +#define QLA8044_SEM_LOCK_BASE 0x3840 +#define QLA8044_SEM_UNLOCK_BASE 0x3844 +#define QLA8044_SEM_LOCK_FUNC(f) (QLA8044_SEM_LOCK_BASE+((f)*8)) +#define QLA8044_SEM_UNLOCK_FUNC(f) (QLA8044_SEM_UNLOCK_BASE+((f)*8)) +#define QLA8044_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0)) +#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) +#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) +#define QLA8044_LINK_SPEED_FACTOR 10 +#define QLA8044_FUN7_ACTIVE_INDEX 0x80 + +/* FLASH API Defines */ +#define QLA8044_FLASH_MAX_WAIT_USEC 100 +#define QLA8044_FLASH_LOCK_TIMEOUT 10000 +#define QLA8044_FLASH_SECTOR_SIZE 65536 +#define QLA8044_DRV_LOCK_TIMEOUT 2000 +#define QLA8044_FLASH_SECTOR_ERASE_CMD 0xdeadbeef +#define QLA8044_FLASH_WRITE_CMD 0xdacdacda +#define QLA8044_FLASH_BUFFER_WRITE_CMD 0xcadcadca +#define QLA8044_FLASH_READ_RETRY_COUNT 2000 +#define QLA8044_FLASH_STATUS_READY 0x6 +#define QLA8044_FLASH_BUFFER_WRITE_MIN 2 +#define QLA8044_FLASH_BUFFER_WRITE_MAX 64 +#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1 +#define QLA8044_ERASE_MODE 1 +#define QLA8044_WRITE_MODE 2 +#define QLA8044_DWORD_WRITE_MODE 3 +#define QLA8044_GLOBAL_RESET 0x38CC +#define QLA8044_WILDCARD 0x38F0 +#define QLA8044_INFORMANT 0x38FC +#define QLA8044_HOST_MBX_CTRL 0x3038 +#define QLA8044_FW_MBX_CTRL 0x303C +#define QLA8044_BOOTLOADER_ADDR 0x355C +#define QLA8044_BOOTLOADER_SIZE 0x3560 +#define QLA8044_FW_IMAGE_ADDR 0x3564 +#define QLA8044_MBX_INTR_ENABLE 0x1000 +#define QLA8044_MBX_INTR_MASK 0x1200 + +/* IDC Control Register bit defines */ +#define DONTRESET_BIT0 0x1 +#define GRACEFUL_RESET_BIT1 0x2 + +/* ISP8044 PEG_HALT_STATUS1 bits */ +#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29) +#define QLA8044_HALT_STATUS_FW_RESET (0x2 << 29) +#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29) + +/* Firmware image definitions */ +#define QLA8044_BOOTLOADER_FLASH_ADDR 0x10000 +#define QLA8044_BOOT_FROM_FLASH 0 +#define QLA8044_IDC_PARAM_ADDR 0x3e8020 + +/* FLASH related definitions */ +#define QLA8044_OPTROM_BURST_SIZE 0x100 +#define QLA8044_MAX_OPTROM_BURST_DWORDS (QLA8044_OPTROM_BURST_SIZE / 4) +#define QLA8044_MIN_OPTROM_BURST_DWORDS 2 +#define QLA8044_SECTOR_SIZE (64 * 1024) + +#define QLA8044_FLASH_SPI_CTL 0x4 +#define QLA8044_FLASH_FIRST_TEMP_VAL 0x00800000 +#define QLA8044_FLASH_SECOND_TEMP_VAL 0x00800001 +#define QLA8044_FLASH_FIRST_MS_PATTERN 0x43 +#define QLA8044_FLASH_SECOND_MS_PATTERN 0x7F +#define QLA8044_FLASH_LAST_MS_PATTERN 0x7D +#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG 0xFD0100 +#define QLA8044_FLASH_SECOND_ERASE_MS_VAL 0x5 +#define QLA8044_FLASH_ERASE_SIG 0xFD0300 +#define QLA8044_FLASH_LAST_ERASE_MS_VAL 0x3D + +/* Reset template definitions */ +#define QLA8044_MAX_RESET_SEQ_ENTRIES 16 +#define QLA8044_RESTART_TEMPLATE_SIZE 0x2000 +#define QLA8044_RESET_TEMPLATE_ADDR 0x4F0000 +#define QLA8044_RESET_SEQ_VERSION 0x0101 + +/* Reset template entry opcodes */ +#define OPCODE_NOP 0x0000 +#define OPCODE_WRITE_LIST 0x0001 +#define OPCODE_READ_WRITE_LIST 0x0002 +#define OPCODE_POLL_LIST 0x0004 +#define OPCODE_POLL_WRITE_LIST 0x0008 +#define OPCODE_READ_MODIFY_WRITE 0x0010 +#define OPCODE_SEQ_PAUSE 0x0020 +#define OPCODE_SEQ_END 0x0040 +#define OPCODE_TMPL_END 0x0080 +#define OPCODE_POLL_READ_LIST 0x0100 + +/* Template Header */ +#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE +#define QLA8044_IDC_DRV_CTRL 0x3790 +#define AF_8044_NO_FW_DUMP 27 /* 0x08000000 */ + +#define MINIDUMP_SIZE_36K 36864 + +struct qla8044_reset_template_hdr { + uint16_t version; + uint16_t signature; + uint16_t size; + uint16_t entries; + uint16_t hdr_size; + uint16_t checksum; + uint16_t init_seq_offset; + uint16_t start_seq_offset; +} __packed; + +/* Common Entry Header. */ +struct qla8044_reset_entry_hdr { + uint16_t cmd; + uint16_t size; + uint16_t count; + uint16_t delay; +} __packed; + +/* Generic poll entry type. */ +struct qla8044_poll { + uint32_t test_mask; + uint32_t test_value; +} __packed; + +/* Read modify write entry type. */ +struct qla8044_rmw { + uint32_t test_mask; + uint32_t xor_value; + uint32_t or_value; + uint8_t shl; + uint8_t shr; + uint8_t index_a; + uint8_t rsvd; +} __packed; + +/* Generic Entry Item with 2 DWords. */ +struct qla8044_entry { + uint32_t arg1; + uint32_t arg2; +} __packed; + +/* Generic Entry Item with 4 DWords.*/ +struct qla8044_quad_entry { + uint32_t dr_addr; + uint32_t dr_value; + uint32_t ar_addr; + uint32_t ar_value; +} __packed; + +struct qla8044_reset_template { + int seq_index; + int seq_error; + int array_index; + uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES]; + uint8_t *buff; + uint8_t *stop_offset; + uint8_t *start_offset; + uint8_t *init_offset; + struct qla8044_reset_template_hdr *hdr; + uint8_t seq_end; + uint8_t template_end; +}; + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +struct qla8044_minidump_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +} __packed; + +/* Read CRB entry header */ +struct qla8044_minidump_entry_crb { + struct qla8044_minidump_entry_hdr h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +} __packed; + +struct qla8044_minidump_entry_cache { + struct qla8044_minidump_entry_hdr h; + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + uint32_t data_size; + uint32_t op_count; + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +} __packed; + +/* Read OCM */ +struct qla8044_minidump_entry_rdocm { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; +} __packed; + +/* Read Memory */ +struct qla8044_minidump_entry_rdmem { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +/* Read Memory: For Pex-DMA */ +struct qla8044_minidump_entry_rdmem_pex_dma { + struct qla8044_minidump_entry_hdr h; + uint32_t desc_card_addr; + uint16_t dma_desc_cmd; + uint8_t rsvd[2]; + uint32_t start_dma_cmd; + uint8_t rsvd2[12]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* Read ROM */ +struct qla8044_minidump_entry_rdrom { + struct qla8044_minidump_entry_hdr h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* Mux entry */ +struct qla8044_minidump_entry_mux { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +} __packed; + +/* Queue entry */ +struct qla8044_minidump_entry_queue { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +} __packed; + +/* POLLRD Entry */ +struct qla8044_minidump_entry_pollrd { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr; + uint32_t read_addr; + uint32_t select_value; + uint16_t select_value_stride; + uint16_t op_count; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t data_size; + uint32_t rsvd_1; +} __packed; + +struct qla8044_minidump_entry_rddfe { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t value; + uint8_t stride; + uint8_t stride2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t modify_mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { + struct qla8044_minidump_entry_hdr h; + + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint8_t stride_1; + uint8_t stride_2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t value_2; + uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll; + uint32_t mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +/* RDMUX2 Entry */ +struct qla8044_minidump_entry_rdmux2 { + struct qla8044_minidump_entry_hdr h; + uint32_t select_addr_1; + uint32_t select_addr_2; + uint32_t select_value_1; + uint32_t select_value_2; + uint32_t op_count; + uint32_t select_value_mask; + uint32_t read_addr; + uint8_t select_value_stride; + uint8_t data_size; + uint8_t rsvd[2]; +} __packed; + +/* POLLRDMWR Entry */ +struct qla8044_minidump_entry_pollrdmwr { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll_wait; + uint32_t poll_mask; + uint32_t modify_mask; + uint32_t data_size; +} __packed; + +/* IDC additional information */ +struct qla8044_idc_information { + uint32_t request_desc; /* IDC request descriptor */ + uint32_t info1; /* IDC additional info */ + uint32_t info2; /* IDC additional info */ + uint32_t info3; /* IDC additional info */ +} __packed; + +enum qla_regs { + QLA8044_PEG_HALT_STATUS1_INDEX = 0, + QLA8044_PEG_HALT_STATUS2_INDEX, + QLA8044_PEG_ALIVE_COUNTER_INDEX, + QLA8044_CRB_DRV_ACTIVE_INDEX, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8044_CRB_DRV_STATE_INDEX, + QLA8044_CRB_DRV_SCRATCH_INDEX, + QLA8044_CRB_DEV_PART_INFO_INDEX, + QLA8044_CRB_DRV_IDC_VERSION_INDEX, + QLA8044_FW_VERSION_MAJOR_INDEX, + QLA8044_FW_VERSION_MINOR_INDEX, + QLA8044_FW_VERSION_SUB_INDEX, + QLA8044_CRB_CMDPEG_STATE_INDEX, + QLA8044_CRB_TEMP_STATE_INDEX, +} __packed; + +#define CRB_REG_INDEX_MAX 14 +#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 +#define CRB_CMDPEG_CHECK_DELAY 500 + +static const uint32_t qla8044_reg_tbl[] = { + QLA8044_PEG_HALT_STATUS1, + QLA8044_PEG_HALT_STATUS2, + QLA8044_PEG_ALIVE_COUNTER, + QLA8044_CRB_DRV_ACTIVE, + QLA8044_CRB_DEV_STATE, + QLA8044_CRB_DRV_STATE, + QLA8044_CRB_DRV_SCRATCH, + QLA8044_CRB_DEV_PART_INFO1, + QLA8044_CRB_IDC_VER_MAJOR, + QLA8044_FW_VER_MAJOR, + QLA8044_FW_VER_MINOR, + QLA8044_FW_VER_SUB, + QLA8044_CMDPEG_STATE, + QLA8044_ASIC_TEMP, +}; + +/* MiniDump Structures */ + +/* Driver_code is for driver to write some info about the entry + * currently not used. + */ +#define QLA8044_SS_OCM_WNDREG_INDEX 3 +#define QLA8044_DBG_STATE_ARRAY_LEN 16 +#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA8044_DBG_RSVD_ARRAY_LEN 8 +#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16 +#define QLA8044_SS_PCI_INDEX 0 +#define QLA8044_RDDFE 38 +#define QLA8044_RDMDIO 39 +#define QLA8044_POLLWR 40 + +struct qla8044_minidump_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info_word2; + uint32_t driver_info_word3; + uint32_t driver_info_word4; + + uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN]; + uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN]; +}; + +struct qla8044_pex_dma_descriptor { + struct { + uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */ + uint8_t rsvd[2]; + uint16_t dma_desc_cmd; + } cmd; + uint64_t src_addr; + uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/ + uint8_t rsvd[24]; +} __packed; + +#endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2c6dd3dfe0f..d96bfb55e57 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -47,6 +47,7 @@ MODULE_PARM_DESC(ql2xenableclass2, "Specify if Class 2 operations are supported from the very " "beginning. Default is 0 - class 2 not supported."); + int ql2xlogintimeout = 20; module_param(ql2xlogintimeout, int, S_IRUGO); MODULE_PARM_DESC(ql2xlogintimeout, @@ -103,15 +104,14 @@ MODULE_PARM_DESC(ql2xshiftctondsd, "Set to control shifting of command type processing " "based on total number of SG elements."); -static void qla2x00_free_device(scsi_qla_host_t *); - int ql2xfdmienable=1; module_param(ql2xfdmienable, int, S_IRUGO); MODULE_PARM_DESC(ql2xfdmienable, "Enables FDMI registrations. " "0 - no FDMI. Default is 1 - perform FDMI."); -int ql2xmaxqdepth = MAX_Q_DEPTH; +#define MAX_Q_DEPTH 32 +static int ql2xmaxqdepth = MAX_Q_DEPTH; module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to set for each LUN. " @@ -120,15 +120,17 @@ MODULE_PARM_DESC(ql2xmaxqdepth, int ql2xenabledif = 2; module_param(ql2xenabledif, int, S_IRUGO); MODULE_PARM_DESC(ql2xenabledif, - " Enable T10-CRC-DIF " - " Default is 0 - No DIF Support. 1 - Enable it" - ", 2 - Enable DIF for all types, except Type 0."); + " Enable T10-CRC-DIF:\n" + " Default is 2.\n" + " 0 -- No DIF Support\n" + " 1 -- Enable DIF for all types\n" + " 2 -- Enable DIF for all types, except Type 0.\n"); int ql2xenablehba_err_chk = 2; module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xenablehba_err_chk, " Enable T10-CRC-DIF Error isolation by HBA:\n" - " Default is 1.\n" + " Default is 2.\n" " 0 -- Error isolation disabled\n" " 1 -- Error isolation enabled only for DIX Type 0\n" " 2 -- Error isolation enabled for all Types\n"); @@ -236,6 +238,7 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *); static int qla2x00_change_queue_depth(struct scsi_device *, int, int); static int qla2x00_change_queue_type(struct scsi_device *, int); +static void qla2x00_free_device(scsi_qla_host_t *); struct scsi_host_template qla2xxx_driver_template = { .module = THIS_MODULE, @@ -354,7 +357,12 @@ fail_req_map: static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) { - if (req && req->ring) + if (IS_QLAFX00(ha)) { + if (req && req->ring_fx00) + dma_free_coherent(&ha->pdev->dev, + (req->length_fx00 + 1) * sizeof(request_t), + req->ring_fx00, req->dma_fx00); + } else if (req && req->ring) dma_free_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), req->ring, req->dma); @@ -368,11 +376,16 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) { - if (rsp && rsp->ring) + if (IS_QLAFX00(ha)) { + if (rsp && rsp->ring) + dma_free_coherent(&ha->pdev->dev, + (rsp->length_fx00 + 1) * sizeof(request_t), + rsp->ring_fx00, rsp->dma_fx00); + } else if (rsp && rsp->ring) { dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), rsp->ring, rsp->dma); - + } kfree(rsp); rsp = NULL; } @@ -484,18 +497,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) static char *pci_bus_modes[] = { "33", "66", "100", "133", }; struct qla_hw_data *ha = vha->hw; uint32_t pci_bus; - int pcie_reg; - pcie_reg = pci_pcie_cap(ha->pdev); - if (pcie_reg) { + if (pci_is_pcie(ha->pdev)) { char lwstr[6]; - uint16_t pcie_lstat, lspeed, lwidth; + uint32_t lstat, lspeed, lwidth; - pcie_reg += PCI_EXP_LNKCAP; - pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); - lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); - lwidth = (pcie_lstat & - (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; + pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); + lspeed = lstat & PCI_EXP_LNKCAP_SLS; + lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; strcpy(str, "PCIe ("); switch (lspeed) { @@ -607,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */ - qla2x00_clean_dsd_pool(ha, sp); + qla2x00_clean_dsd_pool(ha, sp, NULL); sp->flags &= ~SRB_CRC_CTX_DSD_VALID; } @@ -657,6 +666,9 @@ qla2x00_sp_compl(void *data, void *ptr, int res) cmd->scsi_done(cmd); } +/* If we are SP1 here, we need to still take and release the host_lock as SP1 + * does not have the changes necessary to avoid taking host->host_lock. + */ static int qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { @@ -719,10 +731,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) } sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); - if (!sp) { - set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags); + if (!sp) goto qc24_host_busy; - } sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; @@ -735,7 +745,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); - set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags); goto qc24_host_busy_free_sp; } @@ -772,7 +781,7 @@ static int qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) { #define ABORT_POLLING_PERIOD 1000 -#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) +#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) unsigned long wait_iter = ABORT_WAIT_ITER; scsi_qla_host_t *vha = shost_priv(cmd->device->host); struct qla_hw_data *ha = vha->hw; @@ -835,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) } /* - * qla2x00_wait_for_reset_ready - * Wait till the HBA is online after going through - * <= MAX_RETRIES_OF_ISP_ABORT or - * finally HBA is disabled ie marked offline or flash - * operations are in progress. + * qla2x00_wait_for_hba_ready + * Wait till the HBA is ready before doing driver unload * * Input: * ha - pointer to host adapter structure @@ -848,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) * Does context switching-Release SPIN_LOCK * (if any) before calling this routine. * - * Return: - * Success (Adapter is online/no flash ops) : 0 - * Failed (Adapter is offline/disabled/flash ops in progress) : 1 */ -static int -qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) +static void +qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) { - int return_status; - unsigned long wait_online; struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); - while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || - test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || - ha->optrom_state != QLA_SWAITING || - ha->dpc_active) && time_before(jiffies, wait_online)) + while ((!(vha->flags.online) || ha->dpc_active || + ha->flags.mbox_busy)) msleep(1000); - - if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING) - return_status = QLA_SUCCESS; - else - return_status = QLA_FUNCTION_FAILED; - - ql_dbg(ql_dbg_taskm, vha, 0x8019, - "%s return status=%d.\n", __func__, return_status); - - return return_status; } int @@ -936,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) int ret; unsigned int id, lun; unsigned long flags; - int wait = 0; + int rval, wait = 0; struct qla_hw_data *ha = vha->hw; if (!CMD_SP(cmd)) @@ -965,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) sp_get(sp); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - ret = FAILED; + rval = ha->isp_ops->abort_command(sp); + if (rval) { + if (rval == QLA_FUNCTION_PARAMETER_ERROR) { + /* + * Decrement the ref_count since we can't find the + * command + */ + atomic_dec(&sp->ref_count); + ret = SUCCESS; + } else + ret = FAILED; + ql_dbg(ql_dbg_taskm, vha, 0x8003, - "Abort command mbx failed cmd=%p.\n", cmd); + "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval); } else { ql_dbg(ql_dbg_taskm, vha, 0x8004, "Abort command mbx success cmd=%p.\n", cmd); @@ -976,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } spin_lock_irqsave(&ha->hardware_lock, flags); + /* + * Clear the slot in the oustanding_cmds array if we can't find the + * command to reclaim the resources. + */ + if (rval == QLA_FUNCTION_PARAMETER_ERROR) + vha->req->outstanding_cmds[sp->handle] = NULL; sp->done(ha, sp, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1227,14 +1229,18 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) ql_log(ql_log_info, vha, 0x8018, "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); - if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) + /* + * No point in issuing another reset if one is active. Also do not + * attempt a reset if we are updating flash. + */ + if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) goto eh_host_reset_lock; if (vha != base_vha) { if (qla2x00_vp_abort_isp(vha)) goto eh_host_reset_lock; } else { - if (IS_QLA82XX(vha->hw)) { + if (IS_P3P_TYPE(vha->hw)) { if (!qla82xx_fcoe_ctx_reset(vha)) { /* Ctx reset success */ ret = SUCCESS; @@ -1290,6 +1296,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; + if (IS_QLAFX00(ha)) { + return qlafx00_loop_reset(vha); + } + if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) { list_for_each_entry(fcport, &vha->vp_fcports, list) { if (fcport->port_type != FCT_TARGET) @@ -1298,12 +1308,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) ret = ha->isp_ops->target_reset(fcport, 0, 0); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_taskm, vha, 0x802c, - "Bus Reset failed: Target Reset=%d " + "Bus Reset failed: Reset=%d " "d_id=%x.\n", ret, fcport->d_id.b24); } } } + if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); @@ -1460,81 +1471,6 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) return tag_type; } -static void -qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha) -{ - scsi_qla_host_t *vp; - struct Scsi_Host *shost; - struct scsi_device *sdev; - struct qla_hw_data *ha = vha->hw; - unsigned long flags; - - ha->host_last_rampdown_time = jiffies; - - if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun) - return; - - if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun) - ha->cfg_lun_q_depth = vha->host->cmd_per_lun; - else - ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2; - - /* - * Geometrically ramp down the queue depth for all devices on this - * adapter - */ - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { - shost = vp->host; - shost_for_each_device(sdev, shost) { - if (sdev->queue_depth > shost->cmd_per_lun) { - if (sdev->queue_depth < ha->cfg_lun_q_depth) - continue; - ql_log(ql_log_warn, vp, 0x3031, - "%ld:%d:%d: Ramping down queue depth to %d", - vp->host_no, sdev->id, sdev->lun, - ha->cfg_lun_q_depth); - qla2x00_change_queue_depth(sdev, - ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT); - } - } - } - spin_unlock_irqrestore(&ha->vport_slock, flags); - - return; -} - -static void -qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha) -{ - scsi_qla_host_t *vp; - struct Scsi_Host *shost; - struct scsi_device *sdev; - struct qla_hw_data *ha = vha->hw; - unsigned long flags; - - ha->host_last_rampup_time = jiffies; - ha->cfg_lun_q_depth++; - - /* - * Linearly ramp up the queue depth for all devices on this - * adapter - */ - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { - shost = vp->host; - shost_for_each_device(sdev, shost) { - if (sdev->queue_depth > ha->cfg_lun_q_depth) - continue; - qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth, - SCSI_QDEPTH_RAMP_UP); - } - } - spin_unlock_irqrestore(&ha->vport_slock, flags); - - return; -} - /** * qla2x00_config_dma_addressing() - Configure OS DMA addressing method. * @ha: HA context @@ -1858,6 +1794,7 @@ static struct isp_operations qla2100_isp_ops = { .start_scsi = qla2x00_start_scsi, .abort_isp = qla2x00_abort_isp, .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla2300_isp_ops = { @@ -1894,7 +1831,8 @@ static struct isp_operations qla2300_isp_ops = { .get_flash_version = qla2x00_get_flash_version, .start_scsi = qla2x00_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla24xx_isp_ops = { @@ -1931,7 +1869,8 @@ static struct isp_operations qla24xx_isp_ops = { .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla25xx_isp_ops = { @@ -1968,7 +1907,8 @@ static struct isp_operations qla25xx_isp_ops = { .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla81xx_isp_ops = { @@ -2005,7 +1945,8 @@ static struct isp_operations qla81xx_isp_ops = { .get_flash_version = qla24xx_get_flash_version, .start_scsi = qla24xx_dif_start_scsi, .abort_isp = qla2x00_abort_isp, - .iospace_config = qla2x00_iospace_config, + .iospace_config = qla2x00_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla82xx_isp_ops = { @@ -2033,16 +1974,55 @@ static struct isp_operations qla82xx_isp_ops = { .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, .read_nvram = qla24xx_read_nvram_data, .write_nvram = qla24xx_write_nvram_data, - .fw_dump = qla24xx_fw_dump, + .fw_dump = qla82xx_fw_dump, .beacon_on = qla82xx_beacon_on, .beacon_off = qla82xx_beacon_off, .beacon_blink = NULL, .read_optrom = qla82xx_read_optrom_data, .write_optrom = qla82xx_write_optrom_data, - .get_flash_version = qla24xx_get_flash_version, + .get_flash_version = qla82xx_get_flash_version, .start_scsi = qla82xx_start_scsi, .abort_isp = qla82xx_abort_isp, .iospace_config = qla82xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qla8044_isp_ops = { + .pci_config = qla82xx_pci_config, + .reset_chip = qla82xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla82xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla24xx_update_fw_options, + .load_risc = qla82xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla8044_intr_handler, + .enable_intrs = qla82xx_enable_intrs, + .disable_intrs = qla82xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla8044_fw_dump, + .beacon_on = qla82xx_beacon_on, + .beacon_off = qla82xx_beacon_off, + .beacon_blink = NULL, + .read_optrom = qla8044_read_optrom_data, + .write_optrom = qla8044_write_optrom_data, + .get_flash_version = qla82xx_get_flash_version, + .start_scsi = qla82xx_start_scsi, + .abort_isp = qla8044_abort_isp, + .iospace_config = qla82xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, }; static struct isp_operations qla83xx_isp_ops = { @@ -2080,6 +2060,83 @@ static struct isp_operations qla83xx_isp_ops = { .start_scsi = qla24xx_dif_start_scsi, .abort_isp = qla2x00_abort_isp, .iospace_config = qla83xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, +}; + +static struct isp_operations qlafx00_isp_ops = { + .pci_config = qlafx00_pci_config, + .reset_chip = qlafx00_soft_reset, + .chip_diag = qlafx00_chip_diag, + .config_rings = qlafx00_config_rings, + .reset_adapter = qlafx00_soft_reset, + .nvram_config = NULL, + .update_fw_options = NULL, + .load_risc = NULL, + .pci_info_str = qlafx00_pci_info_str, + .fw_version_str = qlafx00_fw_version_str, + .intr_handler = qlafx00_intr_handler, + .enable_intrs = qlafx00_enable_intrs, + .disable_intrs = qlafx00_disable_intrs, + .abort_command = qla24xx_async_abort_command, + .target_reset = qlafx00_abort_target, + .lun_reset = qlafx00_lun_reset, + .fabric_login = NULL, + .fabric_logout = NULL, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = qla24xx_read_nvram_data, + .write_nvram = qla24xx_write_nvram_data, + .fw_dump = NULL, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = NULL, + .read_optrom = qla24xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qlafx00_start_scsi, + .abort_isp = qlafx00_abort_isp, + .iospace_config = qlafx00_iospace_config, + .initialize_adapter = qlafx00_initialize_adapter, +}; + +static struct isp_operations qla27xx_isp_ops = { + .pci_config = qla25xx_pci_config, + .reset_chip = qla24xx_reset_chip, + .chip_diag = qla24xx_chip_diag, + .config_rings = qla24xx_config_rings, + .reset_adapter = qla24xx_reset_adapter, + .nvram_config = qla81xx_nvram_config, + .update_fw_options = qla81xx_update_fw_options, + .load_risc = qla81xx_load_risc, + .pci_info_str = qla24xx_pci_info_str, + .fw_version_str = qla24xx_fw_version_str, + .intr_handler = qla24xx_intr_handler, + .enable_intrs = qla24xx_enable_intrs, + .disable_intrs = qla24xx_disable_intrs, + .abort_command = qla24xx_abort_command, + .target_reset = qla24xx_abort_target, + .lun_reset = qla24xx_lun_reset, + .fabric_login = qla24xx_login_fabric, + .fabric_logout = qla24xx_fabric_logout, + .calc_req_entries = NULL, + .build_iocbs = NULL, + .prep_ms_iocb = qla24xx_prep_ms_iocb, + .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, + .read_nvram = NULL, + .write_nvram = NULL, + .fw_dump = qla27xx_fwdump, + .beacon_on = qla24xx_beacon_on, + .beacon_off = qla24xx_beacon_off, + .beacon_blink = qla83xx_beacon_blink, + .read_optrom = qla25xx_read_optrom_data, + .write_optrom = qla24xx_write_optrom_data, + .get_flash_version = qla24xx_get_flash_version, + .start_scsi = qla24xx_dif_start_scsi, + .abort_isp = qla2x00_abort_isp, + .iospace_config = qla83xx_iospace_config, + .initialize_adapter = qla2x00_initialize_adapter, }; static inline void @@ -2176,6 +2233,14 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) /* Initialize 82XX ISP flags */ qla82xx_init_flags(ha); break; + case PCI_DEVICE_ID_QLOGIC_ISP8044: + ha->device_type |= DT_ISP8044; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + /* Initialize 82XX ISP flags */ + qla82xx_init_flags(ha); + break; case PCI_DEVICE_ID_QLOGIC_ISP2031: ha->device_type |= DT_ISP2031; ha->device_type |= DT_ZIO_SUPPORTED; @@ -2192,21 +2257,39 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; + case PCI_DEVICE_ID_QLOGIC_ISPF001: + ha->device_type |= DT_ISPFX00; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2071: + ha->device_type |= DT_ISP2071; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2271: + ha->device_type |= DT_ISP2271; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; } if (IS_QLA82XX(ha)) - ha->port_no = !(ha->portnum & 1); - else + ha->port_no = ha->portnum & 1; + else { /* Get adapter physical port no from interrupt pin register. */ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); + if (IS_QLA27XX(ha)) + ha->port_no--; + else + ha->port_no = !(ha->port_no & 1); + } - if (ha->port_no & 1) - ha->flags.port0 = 1; - else - ha->flags.port0 = 0; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", - ha->device_type, ha->flags.port0, ha->fw_srisc_address); + ha->device_type, ha->port_no, ha->fw_srisc_address); } static void @@ -2253,7 +2336,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) uint16_t req_length = 0, rsp_length = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; - bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); sht = &qla2xxx_driver_template; if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 || @@ -2265,7 +2347,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || - pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) { + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; ql_dbg_pci(ql_dbg_init, pdev, 0x0007, @@ -2302,20 +2388,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&ha->hardware_lock); spin_lock_init(&ha->vport_slock); mutex_init(&ha->selflogin_lock); + mutex_init(&ha->optrom_mutex); /* Set ISP-type information. */ qla2x00_set_isp_flags(ha); /* Set EEH reset type to fundamental if required by hba */ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || - IS_QLA83XX(ha)) + IS_QLA83XX(ha) || IS_QLA27XX(ha)) pdev->needs_freset = 1; ha->prev_topology = 0; ha->init_cb_size = sizeof(init_cb_t); ha->link_data_rate = PORT_SPEED_UNKNOWN; ha->optrom_size = OPTROM_SIZE_2300; - ha->cfg_lun_q_depth = ql2xmaxqdepth; /* Assign ISP specific operations. */ if (IS_QLA2100(ha)) { @@ -2419,6 +2505,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->flash_data_off = FARX_ACCESS_FLASH_DATA; ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; + } else if (IS_QLA8044(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_82XX; + rsp_length = RESPONSE_ENTRY_CNT_82XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_83XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla8044_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA; + ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; + ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; } else if (IS_QLA83XX(ha)) { ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; @@ -2436,6 +2537,36 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; + } else if (IS_QLAFX00(ha)) { + ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00; + ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00; + ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; + req_length = REQUEST_ENTRY_CNT_FX00; + rsp_length = RESPONSE_ENTRY_CNT_FX00; + ha->isp_ops = &qlafx00_isp_ops; + ha->port_down_retry_count = 30; /* default value */ + ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; + ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL; + ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL; + ha->mr.fw_hbt_en = 1; + ha->mr.host_info_resend = false; + ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; + } else if (IS_QLA27XX(ha)) { + ha->portnum = PCI_FUNC(ha->pdev->devfn); + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_24XX; + rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_83XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla27xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; } ql_dbg_pci(ql_dbg_init, pdev, 0x001e, @@ -2475,7 +2606,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->flags.enable_64bit_addressing ? "enable" : "disable"); ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); - if (!ret) { + if (ret) { ql_log_pci(ql_log_fatal, pdev, 0x0031, "Failed to allocate memory for adapter, aborting.\n"); @@ -2500,13 +2631,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host = base_vha->host; base_vha->req = req; - host->can_queue = req->length + 128; if (IS_QLA2XXX_MIDTYPE(ha)) base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; else base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + base_vha->vp_idx; + /* Setup fcport template structure. */ + ha->mr.fcport.vha = base_vha; + ha->mr.fcport.port_type = FCT_UNKNOWN; + ha->mr.fcport.loop_id = FC_NO_LOOP_ID; + qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED); + ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; + ha->mr.fcport.scan_state = 1; + /* Set the SG table size based on ISP type */ if (!IS_FWI2_CAPABLE(ha)) { if (IS_QLA2100(ha)) @@ -2515,11 +2653,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (!IS_QLA82XX(ha)) host->sg_tablesize = QLA_SG_ALL; } - ql_dbg(ql_dbg_init, base_vha, 0x0032, - "can_queue=%d, req=%p, " - "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", - host->can_queue, base_vha->req, - base_vha->mgmt_svr_loop_id, host->sg_tablesize); host->max_id = ha->max_fibre_devices; host->cmd_per_lun = 3; host->unique_id = host->host_no; @@ -2562,19 +2695,33 @@ que_init: rsp->req = req; req->rsp = rsp; + if (IS_QLAFX00(ha)) { + ha->rsp_q_map[0] = rsp; + ha->req_q_map[0] = req; + set_bit(0, ha->req_qid_map); + set_bit(0, ha->rsp_qid_map); + } + /* FWI2-capable only. */ req->req_q_in = &ha->iobase->isp24.req_q_in; req->req_q_out = &ha->iobase->isp24.req_q_out; rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; - if (ha->mqenable || IS_QLA83XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; rsp->rsp_q_out = &ha->mqiobase->isp25mq.rsp_q_out; } - if (IS_QLA82XX(ha)) { + if (IS_QLAFX00(ha)) { + req->req_q_in = &ha->iobase->ispfx00.req_q_in; + req->req_q_out = &ha->iobase->ispfx00.req_q_out; + rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in; + rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out; + } + + if (IS_P3P_TYPE(ha)) { req->req_q_out = &ha->iobase->isp82.req_q_out[0]; rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0]; rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0]; @@ -2595,7 +2742,7 @@ que_init: "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); - if (qla2x00_initialize_adapter(base_vha)) { + if (ha->isp_ops->initialize_adapter(base_vha)) { ql_log(ql_log_fatal, base_vha, 0x00d6, "Failed to initialize adapter - Adapter flags %x.\n", base_vha->device_flags); @@ -2607,12 +2754,30 @@ que_init: qla82xx_idc_unlock(ha); ql_log(ql_log_fatal, base_vha, 0x00d7, "HW State: FAILED.\n"); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_wr_direct(base_vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + qla8044_idc_unlock(ha); + ql_log(ql_log_fatal, base_vha, 0x0150, + "HW State: FAILED.\n"); } ret = -ENODEV; goto probe_failed; } + if (IS_QLAFX00(ha)) + host->can_queue = QLAFX00_MAX_CANQUEUE; + else + host->can_queue = req->num_outstanding_cmds - 10; + + ql_dbg(ql_dbg_init, base_vha, 0x0032, + "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", + host->can_queue, base_vha->req, + base_vha->mgmt_svr_loop_id, host->sg_tablesize); + if (ha->mqenable) { if (qla25xx_setup_mode(base_vha)) { ql_log(ql_log_warn, base_vha, 0x00ec, @@ -2646,6 +2811,8 @@ que_init: */ qla2xxx_wake_dpc(base_vha); + INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); + if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no); ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); @@ -2702,12 +2869,20 @@ skip_dpc: ha->isp_ops->enable_intrs(ha); + if (IS_QLAFX00(ha)) { + ret = qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO); + host->sg_tablesize = (ha->mr.extended_io_enabled) ? + QLA_SG_ALL : 128; + } + ret = scsi_add_host(host, &pdev->dev); if (ret) goto probe_failed; base_vha->flags.init_done = 1; base_vha->flags.online = 1; + ha->prev_minidump_failed = 0; ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); @@ -2720,13 +2895,21 @@ skip_dpc: qla2x00_alloc_sysfs_attr(base_vha); + if (IS_QLAFX00(ha)) { + ret = qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); + + /* Register system information */ + ret = qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO); + } + qla2x00_init_host_attr(base_vha); qla2x00_dfs_setup(base_vha); ql_log(ql_log_info, base_vha, 0x00fb, - "QLogic %s - %s.\n", - ha->model_number, ha->model_desc ? ha->model_desc : ""); + "QLogic %s - %s.\n", ha->model_number, ha->model_desc); ql_log(ql_log_info, base_vha, 0x00fc, "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), @@ -2768,15 +2951,22 @@ probe_hw_failed: qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } + if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_drv_active(ha); + qla8044_idc_unlock(ha); + } iospace_config_failed: - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { if (!ha->nx_pcibase) - iounmap((device_reg_t __iomem *)ha->nx_pcibase); + iounmap((device_reg_t *)ha->nx_pcibase); if (!ql2xdbwr) - iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); + iounmap((device_reg_t *)ha->nxdb_wr_ptr); } else { if (ha->iobase) iounmap(ha->iobase); + if (ha->cregbase) + iounmap(ha->cregbase); } pci_release_selected_regions(ha->pdev, ha->bars); kfree(ha); @@ -2788,22 +2978,6 @@ probe_out: } static void -qla2x00_stop_dpc_thread(scsi_qla_host_t *vha) -{ - struct qla_hw_data *ha = vha->hw; - struct task_struct *t = ha->dpc_thread; - - if (ha->dpc_thread == NULL) - return; - /* - * qla2xxx_wake_dpc checks for ->dpc_thread - * so we need to zero it out. - */ - ha->dpc_thread = NULL; - kthread_stop(t); -} - -static void qla2x00_shutdown(struct pci_dev *pdev) { scsi_qla_host_t *vha; @@ -2815,6 +2989,10 @@ qla2x00_shutdown(struct pci_dev *pdev) vha = pci_get_drvdata(pdev); ha = vha->hw; + /* Notify ISPFX00 firmware */ + if (IS_QLAFX00(ha)) + qlafx00_driver_shutdown(vha, 20); + /* Turn-off FCE trace */ if (ha->flags.fce_enabled) { qla2x00_disable_fce_trace(vha, NULL, NULL); @@ -2842,30 +3020,16 @@ qla2x00_shutdown(struct pci_dev *pdev) qla2x00_free_fw_dump(ha); } +/* Deletes all the virtual ports for a given ha */ static void -qla2x00_remove_one(struct pci_dev *pdev) +qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) { - scsi_qla_host_t *base_vha, *vha; - struct qla_hw_data *ha; + struct Scsi_Host *scsi_host; + scsi_qla_host_t *vha; unsigned long flags; - /* - * If the PCI device is disabled that means that probe failed and any - * resources should be have cleaned up on probe exit. - */ - if (!atomic_read(&pdev->enable_cnt)) - return; - - base_vha = pci_get_drvdata(pdev); - ha = base_vha->hw; - - ha->flags.host_shutting_down = 1; - - set_bit(UNLOADING, &base_vha->dpc_flags); mutex_lock(&ha->vport_lock); while (ha->cur_vport_count) { - struct Scsi_Host *scsi_host; - spin_lock_irqsave(&ha->vport_slock, flags); BUG_ON(base_vha->list.next == &ha->vp_list); @@ -2882,27 +3046,12 @@ qla2x00_remove_one(struct pci_dev *pdev) mutex_lock(&ha->vport_lock); } mutex_unlock(&ha->vport_lock); +} - if (IS_QLA8031(ha)) { - ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, - "Clearing fcoe driver presence.\n"); - if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) - ql_dbg(ql_dbg_p3p, base_vha, 0xb079, - "Error while clearing DRV-Presence.\n"); - } - - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); - - qla2x00_dfs_remove(base_vha); - - qla84xx_put_chip(base_vha); - - /* Disable timer */ - if (base_vha->timer_active) - qla2x00_stop_timer(base_vha); - - base_vha->flags.online = 0; - +/* Stops all deferred work threads */ +static void +qla2x00_destroy_deferred_work(struct qla_hw_data *ha) +{ /* Flush the work queue and remove it */ if (ha->wq) { flush_workqueue(ha->wq); @@ -2936,36 +3085,109 @@ qla2x00_remove_one(struct pci_dev *pdev) ha->dpc_thread = NULL; kthread_stop(t); } - qlt_remove_target(ha, base_vha); - - qla2x00_free_sysfs_attr(base_vha); - - fc_remove_host(base_vha->host); - - scsi_remove_host(base_vha->host); - - qla2x00_free_device(base_vha); - - scsi_host_put(base_vha->host); +} +static void +qla2x00_unmap_iobases(struct qla_hw_data *ha) +{ if (IS_QLA82XX(ha)) { - qla82xx_idc_lock(ha); - qla82xx_clear_drv_active(ha); - qla82xx_idc_unlock(ha); - iounmap((device_reg_t __iomem *)ha->nx_pcibase); + iounmap((device_reg_t *)ha->nx_pcibase); if (!ql2xdbwr) - iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); + iounmap((device_reg_t *)ha->nxdb_wr_ptr); } else { if (ha->iobase) iounmap(ha->iobase); + if (ha->cregbase) + iounmap(ha->cregbase); + if (ha->mqiobase) iounmap(ha->mqiobase); - if (IS_QLA83XX(ha) && ha->msixbase) + if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase) iounmap(ha->msixbase); } +} + +static void +qla2x00_clear_drv_active(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_drv_active(ha); + qla8044_idc_unlock(ha); + } else if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_clear_drv_active(ha); + qla82xx_idc_unlock(ha); + } +} + +static void +qla2x00_remove_one(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha; + struct qla_hw_data *ha; + + /* + * If the PCI device is disabled that means that probe failed and any + * resources should be have cleaned up on probe exit. + */ + if (!atomic_read(&pdev->enable_cnt)) + return; + + base_vha = pci_get_drvdata(pdev); + ha = base_vha->hw; + + qla2x00_wait_for_hba_ready(base_vha); + + set_bit(UNLOADING, &base_vha->dpc_flags); + + if (IS_QLAFX00(ha)) + qlafx00_driver_shutdown(base_vha, 20); + + qla2x00_delete_all_vps(ha, base_vha); + + if (IS_QLA8031(ha)) { + ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, + "Clearing fcoe driver presence.\n"); + if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_p3p, base_vha, 0xb079, + "Error while clearing DRV-Presence.\n"); + } + + qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + + qla2x00_dfs_remove(base_vha); + + qla84xx_put_chip(base_vha); + + /* Disable timer */ + if (base_vha->timer_active) + qla2x00_stop_timer(base_vha); + + base_vha->flags.online = 0; + + qla2x00_destroy_deferred_work(ha); + + qlt_remove_target(ha, base_vha); + + qla2x00_free_sysfs_attr(base_vha, true); + + fc_remove_host(base_vha->host); + + scsi_remove_host(base_vha->host); + + qla2x00_free_device(base_vha); + + scsi_host_put(base_vha->host); + + qla2x00_clear_drv_active(base_vha); + + qla2x00_unmap_iobases(ha); pci_release_selected_regions(ha->pdev, ha->bars); kfree(ha); @@ -2974,7 +3196,6 @@ qla2x00_remove_one(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static void @@ -2988,9 +3209,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) if (vha->timer_active) qla2x00_stop_timer(vha); - qla2x00_stop_dpc_thread(vha); - qla25xx_delete_queues(vha); + if (ha->flags.fce_enabled) qla2x00_disable_fce_trace(vha, NULL, NULL); @@ -3068,6 +3288,12 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, int do_login, int defer) { + if (IS_QLAFX00(vha->hw)) { + qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); + qla2x00_schedule_rport_del(vha, fcport, defer); + return; + } + if (atomic_read(&fcport->state) == FCS_ONLINE && vha->vp_idx == fcport->vha->vp_idx) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); @@ -3088,14 +3314,8 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, set_bit(RELOGIN_NEEDED, &vha->dpc_flags); ql_dbg(ql_dbg_disc, vha, 0x2067, - "Port login retry " - "%02x%02x%02x%02x%02x%02x%02x%02x, " - "id = 0x%04x retry cnt=%d.\n", - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], - fcport->loop_id, fcport->login_retry); + "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", + fcport->port_name, fcport->loop_id, fcport->login_retry); } } @@ -3168,7 +3388,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->srb_mempool) goto fail_free_gid_list; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { /* Allocate cache for CT6 Ctx. */ if (!ctx_cachep) { ctx_cachep = kmem_cache_create("qla2xxx_ctx", @@ -3202,7 +3422,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); - if (IS_QLA82XX(ha) || ql2xenabledif) { + if (IS_P3P_TYPE(ha) || ql2xenabledif) { ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, DSD_LIST_DMA_POOL_SIZE, 8, 0); if (!ha->dl_dma_pool) { @@ -3301,7 +3521,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ha->npiv_info = NULL; /* Get consistent memory allocated for EX-INIT-CB. */ - if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) { + if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->ex_init_cb_dma); if (!ha->ex_init_cb) @@ -3332,10 +3552,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, - "loop_id_map=%p. \n", ha->loop_id_map); + "loop_id_map=%p.\n", ha->loop_id_map); } - return 1; + return 0; fail_async_pd: dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); @@ -3410,28 +3630,35 @@ fail: * Frees fw dump stuff. * * Input: -* ha = adapter block pointer. +* ha = adapter block pointer */ static void qla2x00_free_fw_dump(struct qla_hw_data *ha) { if (ha->fce) - dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, - ha->fce_dma); + dma_free_coherent(&ha->pdev->dev, + FCE_SIZE, ha->fce, ha->fce_dma); - if (ha->fw_dump) { - if (ha->eft) - dma_free_coherent(&ha->pdev->dev, - ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); + if (ha->eft) + dma_free_coherent(&ha->pdev->dev, + EFT_SIZE, ha->eft, ha->eft_dma); + + if (ha->fw_dump) vfree(ha->fw_dump); - } + if (ha->fw_dump_template) + vfree(ha->fw_dump_template); + ha->fce = NULL; ha->fce_dma = 0; ha->eft = NULL; ha->eft_dma = 0; - ha->fw_dump = NULL; ha->fw_dumped = 0; + ha->fw_dump_cap_flags = 0; ha->fw_dump_reading = 0; + ha->fw_dump = NULL; + ha->fw_dump_len = 0; + ha->fw_dump_template = NULL; + ha->fw_dump_template_len = 0; } /* @@ -3710,6 +3937,22 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp); } +int +qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, + uint32_t *data, int cnt) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_AENFX); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.aenfx.evtcode = evtcode; + e->u.aenfx.count = cnt; + memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt); + return qla2x00_post_work(vha, e); +} + void qla2x00_do_work(struct scsi_qla_host *vha) { @@ -3758,6 +4001,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_UEVENT: qla2x00_uevent_emit(vha, e->u.uevent.code); break; + case QLA_EVT_AENFX: + qlafx00_process_aen(vha, e); + break; } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); @@ -4508,6 +4754,66 @@ exit: return rval; } +void +qla2x00_disable_board_on_pci_error(struct work_struct *work) +{ + struct qla_hw_data *ha = container_of(work, struct qla_hw_data, + board_disable); + struct pci_dev *pdev = ha->pdev; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + ql_log(ql_log_warn, base_vha, 0x015b, + "Disabling adapter.\n"); + + set_bit(UNLOADING, &base_vha->dpc_flags); + + qla2x00_delete_all_vps(ha, base_vha); + + qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + + qla2x00_dfs_remove(base_vha); + + qla84xx_put_chip(base_vha); + + if (base_vha->timer_active) + qla2x00_stop_timer(base_vha); + + base_vha->flags.online = 0; + + qla2x00_destroy_deferred_work(ha); + + /* + * Do not try to stop beacon blink as it will issue a mailbox + * command. + */ + qla2x00_free_sysfs_attr(base_vha, false); + + fc_remove_host(base_vha->host); + + scsi_remove_host(base_vha->host); + + base_vha->flags.init_done = 0; + qla25xx_delete_queues(base_vha); + qla2x00_free_irqs(base_vha); + qla2x00_free_fcports(base_vha); + qla2x00_mem_free(ha); + qla82xx_md_free(base_vha); + qla2x00_free_queues(ha); + + scsi_host_put(base_vha->host); + + qla2x00_unmap_iobases(ha); + + pci_release_selected_regions(ha->pdev, ha->bars); + kfree(ha); + ha = NULL; + + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + +} + /************************************************************************** * qla2x00_do_dpc * This kernel thread is a task that is schedule by the interrupt handler @@ -4531,7 +4837,7 @@ qla2x00_do_dpc(void *data) ha = (struct qla_hw_data *)data; base_vha = pci_get_drvdata(ha->pdev); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { @@ -4558,17 +4864,33 @@ qla2x00_do_dpc(void *data) qla2x00_do_work(base_vha); - if (IS_QLA82XX(ha)) { - if (test_and_clear_bit(ISP_UNRECOVERABLE, - &base_vha->dpc_flags)) { - qla82xx_idc_lock(ha); - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA8XXX_DEV_FAILED); - qla82xx_idc_unlock(ha); - ql_log(ql_log_info, base_vha, 0x4004, - "HW State: FAILED.\n"); - qla82xx_device_state_handler(base_vha); - continue; + if (IS_P3P_TYPE(ha)) { + if (IS_QLA8044(ha)) { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + qla8044_idc_lock(ha); + qla8044_wr_direct(base_vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + qla8044_idc_unlock(ha); + ql_log(ql_log_info, base_vha, 0x4004, + "HW State: FAILED.\n"); + qla8044_device_state_handler(base_vha); + continue; + } + + } else { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + qla82xx_idc_lock(ha); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA8XXX_DEV_FAILED); + qla82xx_idc_unlock(ha); + ql_log(ql_log_info, base_vha, 0x0151, + "HW State: FAILED.\n"); + qla82xx_device_state_handler(base_vha); + continue; + } } if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED, @@ -4592,6 +4914,47 @@ qla2x00_do_dpc(void *data) ql_dbg(ql_dbg_dpc, base_vha, 0x4006, "FCoE context reset end.\n"); } + } else if (IS_QLAFX00(ha)) { + if (test_and_clear_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4020, + "Firmware Reset Recovery\n"); + if (qlafx00_reset_initialize(base_vha)) { + /* Failed. Abort isp later. */ + if (!test_bit(UNLOADING, + &base_vha->dpc_flags)) { + set_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, + 0x4021, + "Reset Recovery Failed\n"); + } + } + } + + if (test_and_clear_bit(FX00_TARGET_SCAN, + &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4022, + "ISPFx00 Target Scan scheduled\n"); + if (qlafx00_rescan_isp(base_vha)) { + if (!test_bit(UNLOADING, + &base_vha->dpc_flags)) + set_bit(ISP_UNRECOVERABLE, + &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, 0x401e, + "ISPFx00 Target Scan Failed\n"); + } + ql_dbg(ql_dbg_dpc, base_vha, 0x401f, + "ISPFx00 Target Scan End\n"); + } + if (test_and_clear_bit(FX00_HOST_INFO_RESEND, + &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4023, + "ISPFx00 Host Info resend scheduled\n"); + qlafx00_fx_disc(base_vha, + &base_vha->hw->mr.fcport, + FXDISC_REG_HOST_INFO); + } } if (test_and_clear_bit(ISP_ABORT_NEEDED, @@ -4630,19 +4993,32 @@ qla2x00_do_dpc(void *data) clear_bit(SCR_PENDING, &base_vha->dpc_flags); } + if (IS_QLAFX00(ha)) + goto loop_resync_check; + if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4009, "Quiescence mode scheduled.\n"); - if (IS_QLA82XX(ha)) { - qla82xx_device_state_handler(base_vha); + if (IS_P3P_TYPE(ha)) { + if (IS_QLA82XX(ha)) + qla82xx_device_state_handler(base_vha); + if (IS_QLA8044(ha)) + qla8044_device_state_handler(base_vha); clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); if (!ha->flags.quiesce_owner) { qla2x00_perform_loop_resync(base_vha); - - qla82xx_idc_lock(ha); - qla82xx_clear_qsnt_ready(base_vha); - qla82xx_idc_unlock(ha); + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_clear_qsnt_ready( + base_vha); + qla82xx_idc_unlock(ha); + } else if (IS_QLA8044(ha)) { + qla8044_idc_lock(ha); + qla8044_clear_qsnt_ready( + base_vha); + qla8044_idc_unlock(ha); + } } } else { clear_bit(ISP_QUIESCE_NEEDED, @@ -4654,7 +5030,7 @@ qla2x00_do_dpc(void *data) } if (test_and_clear_bit(RESET_MARKER_NEEDED, - &base_vha->dpc_flags) && + &base_vha->dpc_flags) && (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { ql_dbg(ql_dbg_dpc, base_vha, 0x400b, @@ -4677,9 +5053,9 @@ qla2x00_do_dpc(void *data) ql_dbg(ql_dbg_dpc, base_vha, 0x400e, "Relogin end.\n"); } - +loop_resync_check: if (test_and_clear_bit(LOOP_RESYNC_NEEDED, - &base_vha->dpc_flags)) { + &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x400f, "Loop resync scheduled.\n"); @@ -4697,32 +5073,27 @@ qla2x00_do_dpc(void *data) "Loop resync end.\n"); } + if (IS_QLAFX00(ha)) + goto intr_on_check; + if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) && atomic_read(&base_vha->loop_state) == LOOP_READY) { clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags); qla2xxx_flash_npiv_conf(base_vha); } - if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, - &base_vha->dpc_flags)) { - /* Prevents simultaneous ramp up and down */ - clear_bit(HOST_RAMP_UP_QUEUE_DEPTH, - &base_vha->dpc_flags); - qla2x00_host_ramp_down_queuedepth(base_vha); - } - - if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH, - &base_vha->dpc_flags)) - qla2x00_host_ramp_up_queuedepth(base_vha); - +intr_on_check: if (!ha->interrupts_on) ha->isp_ops->enable_intrs(ha); if (test_and_clear_bit(BEACON_BLINK_NEEDED, - &base_vha->dpc_flags)) - ha->isp_ops->beacon_blink(base_vha); + &base_vha->dpc_flags)) { + if (ha->beacon_blink_led == 1) + ha->isp_ops->beacon_blink(base_vha); + } - qla2x00_do_dpc_all_vps(base_vha); + if (!IS_QLAFX00(ha)) + qla2x00_do_dpc_all_vps(base_vha); ha->dpc_active = 0; end_loop: @@ -4807,17 +5178,34 @@ qla2x00_timer(scsi_qla_host_t *vha) return; } - /* Hardware read to raise pending EEH errors during mailbox waits. */ - if (!pci_channel_offline(ha->pdev)) + /* + * Hardware read to raise pending EEH errors during mailbox waits. If + * the read returns -1 then disable the board. + */ + if (!pci_channel_offline(ha->pdev)) { pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); + if (w == 0xffff) + /* + * Schedule this on the default system workqueue so that + * all the adapter workqueues and the DPC thread can be + * shutdown cleanly. + */ + schedule_work(&ha->board_disable); + } /* Make sure qla82xx_watchdog is run only for physical port */ - if (!vha->vp_idx && IS_QLA82XX(ha)) { + if (!vha->vp_idx && IS_P3P_TYPE(ha)) { if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) start_dpc++; - qla82xx_watchdog(vha); + if (IS_QLA82XX(ha)) + qla82xx_watchdog(vha); + else if (IS_QLA8044(ha)) + qla8044_watchdog(vha); } + if (!vha->vp_idx && IS_QLAFX00(ha)) + qlafx00_timer_routine(vha); + /* Loop down handler. */ if (atomic_read(&vha->loop_down_timer) > 0 && !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && @@ -4888,11 +5276,10 @@ qla2x00_timer(scsi_qla_host_t *vha) "Loop down - seconds remaining %d.\n", atomic_read(&vha->loop_down_timer)); } - /* Check if beacon LED needs to be blinked for physical host only */ if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { /* There is no beacon_blink function for ISP82xx */ - if (!IS_QLA82XX(ha)) { + if (!IS_P3P_TYPE(ha)) { set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); start_dpc++; } @@ -4912,9 +5299,7 @@ qla2x00_timer(scsi_qla_host_t *vha) test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || - test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || - test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) || - test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) { + test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) { ql_dbg(ql_dbg_timer, vha, 0x600b, "isp_abort_needed=%d loop_resync_needed=%d " "fcport_update_needed=%d start_dpc=%d " @@ -4927,15 +5312,12 @@ qla2x00_timer(scsi_qla_host_t *vha) ql_dbg(ql_dbg_timer, vha, 0x600c, "beacon_blink_needed=%d isp_unrecoverable=%d " "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " - "relogin_needed=%d, host_ramp_down_needed=%d " - "host_ramp_up_needed=%d.\n", + "relogin_needed=%d.\n", test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), test_bit(VP_DPC_NEEDED, &vha->dpc_flags), - test_bit(RELOGIN_NEEDED, &vha->dpc_flags), - test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags), - test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags)); + test_bit(RELOGIN_NEEDED, &vha->dpc_flags)); qla2xxx_wake_dpc(vha); } @@ -4944,7 +5326,7 @@ qla2x00_timer(scsi_qla_host_t *vha) /* Firmware interface routines. */ -#define FW_BLOBS 10 +#define FW_BLOBS 11 #define FW_ISP21XX 0 #define FW_ISP22XX 1 #define FW_ISP2300 2 @@ -4955,6 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha) #define FW_ISP82XX 7 #define FW_ISP2031 8 #define FW_ISP8031 9 +#define FW_ISP27XX 10 #define FW_FILE_ISP21XX "ql2100_fw.bin" #define FW_FILE_ISP22XX "ql2200_fw.bin" @@ -4966,6 +5349,8 @@ qla2x00_timer(scsi_qla_host_t *vha) #define FW_FILE_ISP82XX "ql8200_fw.bin" #define FW_FILE_ISP2031 "ql2600_fw.bin" #define FW_FILE_ISP8031 "ql8300_fw.bin" +#define FW_FILE_ISP27XX "ql2700_fw.bin" + static DEFINE_MUTEX(qla_fw_lock); @@ -4980,6 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = { { .name = FW_FILE_ISP82XX, }, { .name = FW_FILE_ISP2031, }, { .name = FW_FILE_ISP8031, }, + { .name = FW_FILE_ISP27XX, }, }; struct fw_blob * @@ -5008,6 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) blob = &qla_fw_blobs[FW_ISP2031]; } else if (IS_QLA8031(ha)) { blob = &qla_fw_blobs[FW_ISP8031]; + } else if (IS_QLA27XX(ha)) { + blob = &qla_fw_blobs[FW_ISP27XX]; } else { return NULL; } @@ -5335,6 +5723,10 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); @@ -5351,7 +5743,7 @@ static struct pci_driver qla2xxx_pci_driver = { .err_handler = &qla2xxx_err_handler, }; -static struct file_operations apidev_fops = { +static const struct file_operations apidev_fops = { .owner = THIS_MODULE, .llseek = noop_llseek, }; diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index 46ef0ac48f4..2fb7ebfbbc3 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 3bef6736d88..bca173e56f1 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -565,10 +565,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) *start = FA_FLASH_LAYOUT_ADDR; else if (IS_QLA81XX(ha)) *start = FA_FLASH_LAYOUT_ADDR_81; - else if (IS_QLA82XX(ha)) { + else if (IS_P3P_TYPE(ha)) { *start = FA_FLASH_LAYOUT_ADDR_82; goto end; - } else if (IS_QLA83XX(ha)) { + } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { *start = FA_FLASH_LAYOUT_ADDR_83; goto end; } @@ -682,7 +682,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) /* Assign FCP prio region since older adapters may not have FLT, or FCP prio region in it's FLT. */ - ha->flt_region_fcp_prio = ha->flags.port0 ? + ha->flt_region_fcp_prio = (ha->port_no == 0) ? fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; ha->flt_region_flt = flt_addr; @@ -719,7 +719,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) start = le32_to_cpu(region->start) >> 2; ql_dbg(ql_dbg_init, vha, 0x0049, "FLT[%02x]: start=0x%x " - "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), + "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff, start, le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)); @@ -741,57 +741,89 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) if (IS_QLA8031(ha)) break; ha->flt_region_vpd_nvram = start; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) break; - if (ha->flags.port0) + if (ha->port_no == 0) ha->flt_region_vpd = start; break; case FLT_REG_VPD_1: - if (IS_QLA82XX(ha) || IS_QLA8031(ha)) + if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) break; - if (!ha->flags.port0) + if (ha->port_no == 1) + ha->flt_region_vpd = start; + break; + case FLT_REG_VPD_2: + if (!IS_QLA27XX(ha)) + break; + if (ha->port_no == 2) + ha->flt_region_vpd = start; + break; + case FLT_REG_VPD_3: + if (!IS_QLA27XX(ha)) + break; + if (ha->port_no == 3) ha->flt_region_vpd = start; break; case FLT_REG_NVRAM_0: if (IS_QLA8031(ha)) break; - if (ha->flags.port0) + if (ha->port_no == 0) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_1: if (IS_QLA8031(ha)) break; - if (!ha->flags.port0) + if (ha->port_no == 1) + ha->flt_region_nvram = start; + break; + case FLT_REG_NVRAM_2: + if (!IS_QLA27XX(ha)) + break; + if (ha->port_no == 2) + ha->flt_region_nvram = start; + break; + case FLT_REG_NVRAM_3: + if (!IS_QLA27XX(ha)) + break; + if (ha->port_no == 3) ha->flt_region_nvram = start; break; case FLT_REG_FDT: ha->flt_region_fdt = start; break; case FLT_REG_NPIV_CONF_0: - if (ha->flags.port0) + if (ha->port_no == 0) ha->flt_region_npiv_conf = start; break; case FLT_REG_NPIV_CONF_1: - if (!ha->flags.port0) + if (ha->port_no == 1) ha->flt_region_npiv_conf = start; break; case FLT_REG_GOLD_FW: ha->flt_region_gold_fw = start; break; case FLT_REG_FCP_PRIO_0: - if (ha->flags.port0) + if (ha->port_no == 0) ha->flt_region_fcp_prio = start; break; case FLT_REG_FCP_PRIO_1: - if (!ha->flags.port0) + if (ha->port_no == 1) ha->flt_region_fcp_prio = start; break; case FLT_REG_BOOT_CODE_82XX: ha->flt_region_boot = start; break; + case FLT_REG_BOOT_CODE_8044: + if (IS_QLA8044(ha)) + ha->flt_region_boot = start; + break; case FLT_REG_FW_82XX: ha->flt_region_fw = start; break; + case FLT_REG_CNA_FW: + if (IS_CNA_CAPABLE(ha)) + ha->flt_region_fw = start; + break; case FLT_REG_GOLD_FW_82XX: ha->flt_region_gold_fw = start; break; @@ -803,15 +835,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_vpd = start; break; case FLT_REG_FCOE_NVRAM_0: - if (!IS_QLA8031(ha)) + if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) break; - if (ha->flags.port0) + if (ha->port_no == 0) ha->flt_region_nvram = start; break; case FLT_REG_FCOE_NVRAM_1: - if (!IS_QLA8031(ha)) + if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) break; - if (!ha->flags.port0) + if (ha->port_no == 1) ha->flt_region_nvram = start; break; } @@ -824,12 +856,12 @@ no_flash_data: ha->flt_region_fw = def_fw[def]; ha->flt_region_boot = def_boot[def]; ha->flt_region_vpd_nvram = def_vpd_nvram[def]; - ha->flt_region_vpd = ha->flags.port0 ? + ha->flt_region_vpd = (ha->port_no == 0) ? def_vpd0[def] : def_vpd1[def]; - ha->flt_region_nvram = ha->flags.port0 ? + ha->flt_region_nvram = (ha->port_no == 0) ? def_nvram0[def] : def_nvram1[def]; ha->flt_region_fdt = def_fdt[def]; - ha->flt_region_npiv_conf = ha->flags.port0 ? + ha->flt_region_npiv_conf = (ha->port_no == 0) ? def_npiv_conf0[def] : def_npiv_conf1[def]; done: ql_dbg(ql_dbg_init, vha, 0x004a, @@ -883,7 +915,13 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) mid = le16_to_cpu(fdt->man_id); fid = le16_to_cpu(fdt->id); ha->fdt_wrt_disable = fdt->wrt_disable_bits; - ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd); + ha->fdt_wrt_enable = fdt->wrt_enable_bits; + ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd; + if (IS_QLA8044(ha)) + ha->fdt_erase_cmd = fdt->erase_cmd; + else + ha->fdt_erase_cmd = + flash_conf_addr(ha, 0x0300 | fdt->erase_cmd); ha->fdt_block_size = le32_to_cpu(fdt->block_size); if (fdt->unprotect_sec_cmd) { ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 | @@ -895,7 +933,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) goto done; no_flash_data: loc = locations[0]; - if (IS_QLA82XX(ha)) { + if (IS_P3P_TYPE(ha)) { ha->fdt_block_size = FLASH_BLK_SIZE_64K; goto done; } @@ -946,7 +984,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - if (!IS_QLA82XX(ha)) + if (!(IS_P3P_TYPE(ha))) return; wptr = (uint32_t *)req->ring; @@ -975,7 +1013,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && - !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) + !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha)) return QLA_SUCCESS; ret = qla2xxx_find_flt_start(vha, &flt_addr); @@ -1008,6 +1046,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) if (ha->flags.nic_core_reset_hdlr_active) return; + if (IS_QLA8044(ha)) + return; + ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); if (hdr.version == __constant_cpu_to_le16(0xffff)) @@ -1175,7 +1216,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, struct qla_hw_data *ha = vha->hw; /* Prepare burst-capable write on supported ISPs. */ - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) && + if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || + IS_QLA27XX(ha)) && !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); @@ -1302,7 +1344,7 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, uint32_t *dwptr; struct qla_hw_data *ha = vha->hw; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return buf; /* Dword reads to flash. */ @@ -1360,7 +1402,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, ret = QLA_SUCCESS; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return ret; /* Enable flash write. */ @@ -1474,7 +1516,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return; spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1658,7 +1700,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha) if (!IS_QLA83XX(ha)) goto out; - if (ha->flags.port0) + if (ha->port_no == 0) led_select_value = QLA83XX_LED_PORT0; else led_select_value = QLA83XX_LED_PORT1; @@ -1685,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha) if (IS_QLA2031(ha)) { led_select_value = qla83xx_select_led_port(ha); - qla83xx_wr_reg(vha, led_select_value, 0x40002000); - qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000); - msleep(1000); - qla83xx_wr_reg(vha, led_select_value, 0x40004000); - qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000); + qla83xx_wr_reg(vha, led_select_value, 0x40000230); + qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230); } else if (IS_QLA8031(ha)) { led_select_value = qla83xx_select_led_port(ha); @@ -1752,7 +1791,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; if (IS_QLA8031(ha) || IS_QLA81XX(ha)) @@ -1804,7 +1843,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return QLA_SUCCESS; ha->beacon_blink_led = 0; @@ -2315,7 +2354,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, */ rest_addr = 0xffff; sec_mask = 0x10000; - break; + break; } /* * ST m29w010b part - 16kb sector size @@ -2541,7 +2580,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t faddr, left, burst; struct qla_hw_data *ha = vha->hw; - if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) + if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha)) goto try_fast; if (offset & 0xfff) goto slow_read; @@ -2822,6 +2861,121 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) } int +qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) +{ + int ret = QLA_SUCCESS; + uint32_t pcihdr, pcids; + uint32_t *dcode; + uint8_t *bcode; + uint8_t code_type, last_image; + struct qla_hw_data *ha = vha->hw; + + if (!mbuf) + return QLA_FUNCTION_FAILED; + + memset(ha->bios_revision, 0, sizeof(ha->bios_revision)); + memset(ha->efi_revision, 0, sizeof(ha->efi_revision)); + memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + + dcode = mbuf; + + /* Begin with first PCI expansion ROM header. */ + pcihdr = ha->flt_region_boot << 2; + last_image = 1; + do { + /* Verify PCI expansion ROM header. */ + ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr, + 0x20 * 4); + bcode = mbuf + (pcihdr % 4); + if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { + /* No signature */ + ql_log(ql_log_fatal, vha, 0x0154, + "No matching ROM signature.\n"); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Locate PCI data structure. */ + pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); + + ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids, + 0x20 * 4); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || + bcode[0x2] != 'I' || bcode[0x3] != 'R') { + /* Incorrect header. */ + ql_log(ql_log_fatal, vha, 0x0155, + "PCI data struct not found pcir_adr=%x.\n", pcids); + ret = QLA_FUNCTION_FAILED; + break; + } + + /* Read version */ + code_type = bcode[0x14]; + switch (code_type) { + case ROM_CODE_TYPE_BIOS: + /* Intel x86, PC-AT compatible. */ + ha->bios_revision[0] = bcode[0x12]; + ha->bios_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0156, + "Read BIOS %d.%d.\n", + ha->bios_revision[1], ha->bios_revision[0]); + break; + case ROM_CODE_TYPE_FCODE: + /* Open Firmware standard for PCI (FCode). */ + ha->fcode_revision[0] = bcode[0x12]; + ha->fcode_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0157, + "Read FCODE %d.%d.\n", + ha->fcode_revision[1], ha->fcode_revision[0]); + break; + case ROM_CODE_TYPE_EFI: + /* Extensible Firmware Interface (EFI). */ + ha->efi_revision[0] = bcode[0x12]; + ha->efi_revision[1] = bcode[0x13]; + ql_dbg(ql_dbg_init, vha, 0x0158, + "Read EFI %d.%d.\n", + ha->efi_revision[1], ha->efi_revision[0]); + break; + default: + ql_log(ql_log_warn, vha, 0x0159, + "Unrecognized code type %x at pcids %x.\n", + code_type, pcids); + break; + } + + last_image = bcode[0x15] & BIT_7; + + /* Locate next PCI expansion ROM. */ + pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512; + } while (!last_image); + + /* Read firmware image information. */ + memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); + dcode = mbuf; + ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2, + 0x20); + bcode = mbuf + (pcihdr % 4); + + /* Validate signature of PCI data structure. */ + if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 && + bcode[0x2] == 0x40 && bcode[0x3] == 0x40) { + ha->fw_revision[0] = bcode[0x4]; + ha->fw_revision[1] = bcode[0x5]; + ha->fw_revision[2] = bcode[0x6]; + ql_dbg(ql_dbg_init, vha, 0x0153, + "Firmware revision %d.%d.%d\n", + ha->fw_revision[0], ha->fw_revision[1], + ha->fw_revision[2]); + } + + return ret; +} + +int qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) { int ret = QLA_SUCCESS; @@ -2832,7 +2986,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) int i; struct qla_hw_data *ha = vha->hw; - if (IS_QLA82XX(ha)) + if (IS_P3P_TYPE(ha)) return ret; if (!mbuf) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 61b5d8c2b5d..e632e14180c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -10,7 +10,7 @@ * * Forward port and refactoring to modern qla2xxx and target/configfs * - * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org> + * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, /* * Global Variables */ -static struct kmem_cache *qla_tgt_cmd_cachep; static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; static mempool_t *qla_tgt_mgmt_cmd_mempool; static struct workqueue_struct *qla_tgt_wq; @@ -182,6 +181,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio) { + ql_dbg(ql_dbg_tgt, vha, 0xe072, + "%s: qla_target(%d): type %x ox_id %04x\n", + __func__, vha->vp_idx, atio->u.raw.entry_type, + be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + switch (atio->u.raw.entry_type) { case ATIO_TYPE7: { @@ -236,6 +240,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) { switch (pkt->entry_type) { + case CTIO_CRC2: + ql_dbg(ql_dbg_tgt, vha, 0xe073, + "qla_target(%d):%s: CRC2 Response pkt\n", + vha->vp_idx, __func__); case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; @@ -430,13 +438,8 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) } ql_dbg(ql_dbg_tgt, vha, 0xe047, - "scsi(%ld): resetting (session %p from port " - "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " - "mcmd %x, loop_id %d)\n", vha->host_no, sess, - sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], + "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " + "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id); lun = a->u.isp24.fcp_cmnd.lun; @@ -467,21 +470,16 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, sess->expires = jiffies + dev_loss_tmo * HZ; ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %02x:%02x:%02x:" - "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for " + "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " "deletion in %u secs (expires: %lu) immed: %d\n", - sess->vha->vp_idx, - sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], - sess->loop_id, dev_loss_tmo, sess->expires, immediate); + sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, + sess->expires, immediate); if (immediate) schedule_delayed_work(&tgt->sess_del_work, 0); else schedule_delayed_work(&tgt->sess_del_work, - jiffies - sess->expires); + sess->expires - jiffies); } /* ha->hardware_lock supposed to be held on entry */ @@ -544,102 +542,6 @@ out_free_id_list: return res; } -static bool qlt_check_fcport_exist(struct scsi_qla_host *vha, - struct qla_tgt_sess *sess) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_port_24xx_data *pmap24; - bool res, found = false; - int rc, i; - uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */ - uint16_t entries; - void *pmap; - int pmap_len; - fc_port_t *fcport; - int global_resets; - unsigned long flags; - -retry: - global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); - - rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len); - if (rc != QLA_SUCCESS) { - res = false; - goto out; - } - - pmap24 = pmap; - entries = pmap_len/sizeof(*pmap24); - - for (i = 0; i < entries; ++i) { - if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) { - loop_id = le16_to_cpu(pmap24[i].loop_id); - found = true; - break; - } - } - - kfree(pmap); - - if (!found) { - res = false; - goto out; - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046, - "qlt_check_fcport_exist(): loop_id %d", loop_id); - - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); - if (fcport == NULL) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047, - "qla_target(%d): Allocation of tmp FC port failed", - vha->vp_idx); - res = false; - goto out; - } - - fcport->loop_id = loop_id; - - rc = qla2x00_get_port_database(vha, fcport, 0); - if (rc != QLA_SUCCESS) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048, - "qla_target(%d): Failed to retrieve fcport " - "information -- get_port_database() returned %x " - "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); - res = false; - goto out_free_fcport; - } - - if (global_resets != - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, - "qla_target(%d): global reset during session discovery" - " (counter was %d, new %d), retrying", - vha->vp_idx, global_resets, - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); - goto retry; - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003, - "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, " - "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa, - sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); - - spin_lock_irqsave(&ha->hardware_lock, flags); - ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, - (fcport->flags & FCF_CONF_COMP_SUPPORTED)); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - res = true; - -out_free_fcport: - kfree(fcport); - -out: - return res; -} - /* ha->hardware_lock supposed to be held on entry */ static void qlt_undelete_sess(struct qla_tgt_sess *sess) { @@ -656,53 +558,24 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess; - unsigned long flags; + unsigned long flags, elapsed; spin_lock_irqsave(&ha->hardware_lock, flags); while (!list_empty(&tgt->del_sess_list)) { sess = list_entry(tgt->del_sess_list.next, typeof(*sess), del_list_entry); - if (time_after_eq(jiffies, sess->expires)) { - bool cancel; - + elapsed = jiffies; + if (time_after_eq(elapsed, sess->expires)) { qlt_undelete_sess(sess); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - cancel = qlt_check_fcport_exist(vha, sess); - - if (cancel) { - if (sess->deleted) { - /* - * sess was again deleted while we were - * discovering it - */ - spin_lock_irqsave(&ha->hardware_lock, - flags); - continue; - } - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049, - "qla_target(%d): cancel deletion of " - "session for port %02x:%02x:%02x:%02x:%02x:" - "%02x:%02x:%02x (loop ID %d), because " - " it isn't deleted by firmware", - vha->vp_idx, sess->port_name[0], - sess->port_name[1], sess->port_name[2], - sess->port_name[3], sess->port_name[4], - sess->port_name[5], sess->port_name[6], - sess->port_name[7], sess->loop_id); - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, - "Timeout: sess %p about to be deleted\n", - sess); - ha->tgt.tgt_ops->shutdown_sess(sess); - ha->tgt.tgt_ops->put_sess(sess); - } - - spin_lock_irqsave(&ha->hardware_lock, flags); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, + "Timeout: sess %p about to be deleted\n", + sess); + ha->tgt.tgt_ops->shutdown_sess(sess); + ha->tgt.tgt_ops->put_sess(sess); } else { schedule_delayed_work(&tgt->sess_del_work, - jiffies - sess->expires); + sess->expires - elapsed); break; } } @@ -725,7 +598,7 @@ static struct qla_tgt_sess *qlt_create_sess( /* Check to avoid double sessions */ spin_lock_irqsave(&ha->hardware_lock, flags); - list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, + list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list, sess_list_entry) { if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, @@ -756,17 +629,13 @@ static struct qla_tgt_sess *qlt_create_sess( sess = kzalloc(sizeof(*sess), GFP_KERNEL); if (!sess) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a, - "qla_target(%u): session allocation failed, " - "all commands from port %02x:%02x:%02x:%02x:" - "%02x:%02x:%02x:%02x will be refused", vha->vp_idx, - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7]); + "qla_target(%u): session allocation failed, all commands " + "from port %8phC will be refused", vha->vp_idx, + fcport->port_name); return NULL; } - sess->tgt = ha->tgt.qla_tgt; + sess->tgt = vha->vha_tgt.qla_tgt; sess->vha = vha; sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; @@ -774,7 +643,7 @@ static struct qla_tgt_sess *qlt_create_sess( ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", - sess, ha->tgt.qla_tgt); + sess, vha->vha_tgt.qla_tgt); be_sid[0] = sess->s_id.b.domain; be_sid[1] = sess->s_id.b.area; @@ -801,20 +670,16 @@ static struct qla_tgt_sess *qlt_create_sess( memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name)); spin_lock_irqsave(&ha->hardware_lock, flags); - list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); - ha->tgt.qla_tgt->sess_count++; + list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); + vha->vha_tgt.qla_tgt->sess_count++; spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, - "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:" - "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed" - " completion %ssupported) added\n", - vha->vp_idx, local ? "local " : "", fcport->port_name[0], - fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], - fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain, - sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ? - "" : "not "); + "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " + "s_id %x:%x:%x, confirmed completion %ssupported) added\n", + vha->vp_idx, local ? "local " : "", fcport->port_name, + fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area, + sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); return sess; } @@ -825,7 +690,7 @@ static struct qla_tgt_sess *qlt_create_sess( void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; unsigned long flags; @@ -835,6 +700,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) if (!tgt || (fcport->port_type != FCT_INITIATOR)) return; + if (qla_ini_mode_enabled(vha)) + return; + spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) { spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -844,9 +712,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_create_sess(vha, fcport, false); - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); } else { @@ -856,13 +724,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) qlt_undelete_sess(sess); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, - "qla_target(%u): %ssession for port %02x:" - "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) " - "reappeared\n", vha->vp_idx, sess->local ? "local " - : "", sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], + "qla_target(%u): %ssession for port %8phC " + "(loop ID %d) reappeared\n", vha->vp_idx, + sess->local ? "local " : "", sess->port_name, sess->loop_id); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, @@ -875,24 +739,18 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) if (sess && sess->local) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, "qla_target(%u): local session for " - "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "(loop ID %d) became global\n", vha->vp_idx, - fcport->port_name[0], fcport->port_name[1], - fcport->port_name[2], fcport->port_name[3], - fcport->port_name[4], fcport->port_name[5], - fcport->port_name[6], fcport->port_name[7], - sess->loop_id); + "port %8phC (loop ID %d) became global\n", vha->vp_idx, + fcport->port_name, sess->loop_id); sess->local = 0; } - spin_unlock_irqrestore(&ha->hardware_lock, flags); - ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; unsigned long flags; @@ -940,17 +798,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt) } /* Called by tcm_qla2xxx configfs code */ -void qlt_stop_phase1(struct qla_tgt *tgt) +int qlt_stop_phase1(struct qla_tgt *tgt) { struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = tgt->ha; unsigned long flags; + mutex_lock(&qla_tgt_mutex); + if (!vha->fc_vport) { + struct Scsi_Host *sh = vha->host; + struct fc_host_attrs *fc_host = shost_to_fc_host(sh); + bool npiv_vports; + + spin_lock_irqsave(sh->host_lock, flags); + npiv_vports = (fc_host->npiv_vports_inuse); + spin_unlock_irqrestore(sh->host_lock, flags); + + if (npiv_vports) { + mutex_unlock(&qla_tgt_mutex); + return -EPERM; + } + } if (tgt->tgt_stop || tgt->tgt_stopped) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, "Already in tgt->tgt_stop or tgt_stopped state\n"); - dump_stack(); - return; + mutex_unlock(&qla_tgt_mutex); + return -EPERM; } ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", @@ -959,12 +832,13 @@ void qlt_stop_phase1(struct qla_tgt *tgt) * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. * Lock is needed, because we still can get an incoming packet. */ - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); tgt->tgt_stop = 1; qlt_clear_tgt_db(tgt, true); spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); + mutex_unlock(&qla_tgt_mutex); flush_delayed_work(&tgt->sess_del_work); @@ -991,6 +865,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt) /* Wait for sessions to clear out (just in case) */ wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); + return 0; } EXPORT_SYMBOL(qlt_stop_phase1); @@ -998,20 +873,21 @@ EXPORT_SYMBOL(qlt_stop_phase1); void qlt_stop_phase2(struct qla_tgt *tgt) { struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); unsigned long flags; if (tgt->tgt_stopped) { - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, "Already in tgt->tgt_stopped state\n"); dump_stack(); return; } - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, "Waiting for %d IRQ commands to complete (tgt %p)", tgt->irq_cmd_count, tgt); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); while (tgt->irq_cmd_count != 0) { spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1021,9 +897,9 @@ void qlt_stop_phase2(struct qla_tgt *tgt) tgt->tgt_stop = 0; tgt->tgt_stopped = 1; spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished", tgt); } EXPORT_SYMBOL(qlt_stop_phase2); @@ -1031,14 +907,14 @@ EXPORT_SYMBOL(qlt_stop_phase2); /* Called from qlt_remove_target() -> qla2x00_remove_one() */ static void qlt_release(struct qla_tgt *tgt) { - struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = tgt->vha; - if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) + if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) qlt_stop_phase2(tgt); - ha->tgt.qla_tgt = NULL; + vha->vha_tgt.qla_tgt = NULL; - ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, "Release of tgt %p finished\n", tgt); kfree(tgt); @@ -1102,8 +978,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, return; } - if (ha->tgt.qla_tgt != NULL) - ha->tgt.qla_tgt->notify_ack_expected++; + if (vha->vha_tgt.qla_tgt != NULL) + vha->vha_tgt.qla_tgt->notify_ack_expected++; pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; @@ -1207,7 +1083,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha, /* Other bytes are zero */ } - ha->tgt.qla_tgt->abts_resp_expected++; + vha->vha_tgt.qla_tgt->abts_resp_expected++; qla2x00_start_iocbs(vha, vha->req); } @@ -1252,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, ctio->u.status1.flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; + ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); qla2x00_start_iocbs(vha, vha->req); @@ -1359,7 +1235,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, "qla_target(%d): task abort for non-existant session\n", vha->vp_idx); - rc = qlt_sched_sess_work(ha->tgt.qla_tgt, + rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts)); if (rc != 0) { qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, @@ -1386,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, { struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; + uint16_t temp; ql_dbg(ql_dbg_tgt, ha, 0xe008, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", @@ -1416,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, ctio->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); - ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); ctio->u.status1.response_len = __constant_cpu_to_le16(8); @@ -1482,13 +1360,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) prm->cmd->sg_mapped = 1; - /* - * If greater than four sg entries then we need to allocate - * the continuation entries - */ - if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) - prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - - prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { + /* + * If greater than four sg entries then we need to allocate + * the continuation entries + */ + if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) + prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - + prm->tgt->datasegs_per_cmd, + prm->tgt->datasegs_per_cont); + } else { + /* DIF */ + if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || + (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { + prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); + prm->tot_dsds = prm->seg_cnt; + } else + prm->tot_dsds = prm->seg_cnt; + + if (cmd->prot_sg_cnt) { + prm->prot_sg = cmd->prot_sg; + prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, + cmd->prot_sg, cmd->prot_sg_cnt, + cmd->dma_data_direction); + if (unlikely(prm->prot_seg_cnt == 0)) + goto out_err; + + if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || + (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { + /* Dif Bundling not support here */ + prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, + cmd->blk_sz); + prm->tot_dsds += prm->prot_seg_cnt; + } else + prm->tot_dsds += prm->prot_seg_cnt; + } + } ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", prm->seg_cnt, prm->req_cnt); @@ -1509,6 +1416,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha, BUG_ON(!cmd->sg_mapped); pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); cmd->sg_mapped = 0; + + if (cmd->prot_sg_cnt) + pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, + cmd->dma_data_direction); + + if (cmd->ctx_dsd_alloced) + qla2x00_clean_dsd_pool(ha, NULL, cmd); + + if (cmd->ctx) + dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); } static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, @@ -1598,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, struct ctio7_to_24xx *pkt; struct qla_hw_data *ha = vha->hw; struct atio_from_isp *atio = &prm->cmd->atio; + uint16_t temp; pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; prm->pkt = pkt; @@ -1626,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; pkt->u.status0.flags |= (atio->u.isp24.attr << 9); - pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); ql_dbg(ql_dbg_tgt, vha, 0xe00c, "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", - vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, - le16_to_cpu(pkt->u.status0.ox_id)); + vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp); return 0; } @@ -1797,8 +1715,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; } - ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", - vha->vp_idx, cmd->tag); + ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", + vha->vp_idx, cmd->tag, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); prm->cmd = cmd; prm->tgt = tgt; @@ -2034,6 +1953,328 @@ skip_explict_conf: /* Sense with len > 24, is it possible ??? */ } + + +/* diff */ +static inline int +qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) +{ + /* + * Uncomment when corresponding SCSI changes are done. + * + if (!sp->cmd->prot_chk) + return 0; + * + */ + switch (se_cmd->prot_op) { + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + if (ql2xenablehba_err_chk >= 1) + return 1; + break; + case TARGET_PROT_DOUT_PASS: + case TARGET_PROT_DIN_PASS: + if (ql2xenablehba_err_chk >= 2) + return 1; + break; + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + return 1; + default: + break; + } + return 0; +} + +/* + * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command + * + */ +static inline void +qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) +{ + uint32_t lba = 0xffffffff & se_cmd->t_task_lba; + + /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 + * have been immplemented by TCM, before AppTag is avail. + * Look for modesense_handlers[] + */ + ctx->app_tag = 0; + ctx->app_tag_mask[0] = 0x0; + ctx->app_tag_mask[1] = 0x0; + + switch (se_cmd->prot_type) { + case TARGET_DIF_TYPE0_PROT: + /* + * No check for ql2xenablehba_err_chk, as it would be an + * I/O error if hba tag generation is not done. + */ + ctx->ref_tag = cpu_to_le32(lba); + + if (!qlt_hba_err_chk_enabled(se_cmd)) + break; + + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + /* + * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and + * 16 bit app tag. + */ + case TARGET_DIF_TYPE1_PROT: + ctx->ref_tag = cpu_to_le32(lba); + + if (!qlt_hba_err_chk_enabled(se_cmd)) + break; + + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + /* + * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to + * match LBA in CDB + N + */ + case TARGET_DIF_TYPE2_PROT: + ctx->ref_tag = cpu_to_le32(lba); + + if (!qlt_hba_err_chk_enabled(se_cmd)) + break; + + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + + /* For Type 3 protection: 16 bit GUARD only */ + case TARGET_DIF_TYPE3_PROT: + ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = + ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; + break; + } +} + + +static inline int +qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) +{ + uint32_t *cur_dsd; + int sgc; + uint32_t transfer_length = 0; + uint32_t data_bytes; + uint32_t dif_bytes; + uint8_t bundling = 1; + uint8_t *clr_ptr; + struct crc_context *crc_ctx_pkt = NULL; + struct qla_hw_data *ha; + struct ctio_crc2_to_fw *pkt; + dma_addr_t crc_ctx_dma; + uint16_t fw_prot_opts = 0; + struct qla_tgt_cmd *cmd = prm->cmd; + struct se_cmd *se_cmd = &cmd->se_cmd; + uint32_t h; + struct atio_from_isp *atio = &prm->cmd->atio; + uint16_t t16; + + sgc = 0; + ha = vha->hw; + + pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; + prm->pkt = pkt; + memset(pkt, 0, sizeof(*pkt)); + + ql_dbg(ql_dbg_tgt, vha, 0xe071, + "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", + vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, + prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); + + if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || + (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) + bundling = 0; + + /* Compute dif len and adjust data len to incude protection */ + data_bytes = cmd->bufflen; + dif_bytes = (data_bytes / cmd->blk_sz) * 8; + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + transfer_length = data_bytes; + data_bytes += dif_bytes; + break; + + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + transfer_length = data_bytes + dif_bytes; + break; + + default: + BUG(); + break; + } + + if (!qlt_hba_err_chk_enabled(se_cmd)) + fw_prot_opts |= 0x10; /* Disable Guard tag checking */ + /* HBA error checking enabled */ + else if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) + fw_prot_opts |= PO_DIS_VALD_APP_ESC; + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) + fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; + } + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_INSERT: + fw_prot_opts |= PO_MODE_DIF_INSERT; + break; + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_STRIP: + fw_prot_opts |= PO_MODE_DIF_REMOVE; + break; + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + fw_prot_opts |= PO_MODE_DIF_PASS; + /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ + break; + default:/* Normal Request */ + fw_prot_opts |= PO_MODE_DIF_PASS; + break; + } + + + /* ---- PKT ---- */ + /* Update entry type to indicate Command Type CRC_2 IOCB */ + pkt->entry_type = CTIO_CRC2; + pkt->entry_count = 1; + pkt->vp_index = vha->vp_idx; + + h = qlt_make_handle(vha); + if (unlikely(h == QLA_TGT_NULL_HANDLE)) { + /* + * CTIO type 7 from the firmware doesn't provide a way to + * know the initiator's LOOP ID, hence we can't find + * the session and, so, the command. + */ + return -EAGAIN; + } else + ha->tgt.cmds[h-1] = prm->cmd; + + + pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; + pkt->nport_handle = prm->cmd->loop_id; + pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + pkt->exchange_addr = atio->u.isp24.exchange_addr; + + /* silence compile warning */ + t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->ox_id = cpu_to_le16(t16); + + t16 = (atio->u.isp24.attr << 9); + pkt->flags |= cpu_to_le16(t16); + pkt->relative_offset = cpu_to_le32(prm->cmd->offset); + + /* Set transfer direction */ + if (cmd->dma_data_direction == DMA_TO_DEVICE) + pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); + else if (cmd->dma_data_direction == DMA_FROM_DEVICE) + pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); + + + pkt->dseg_count = prm->tot_dsds; + /* Fibre channel byte count */ + pkt->transfer_length = cpu_to_le32(transfer_length); + + + /* ----- CRC context -------- */ + + /* Allocate CRC context from global pool */ + crc_ctx_pkt = cmd->ctx = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); + + if (!crc_ctx_pkt) + goto crc_queuing_error; + + /* Zero out CTX area. */ + clr_ptr = (uint8_t *)crc_ctx_pkt; + memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); + + crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; + INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); + + /* Set handle */ + crc_ctx_pkt->handle = pkt->handle; + + qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); + + pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); + pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); + pkt->crc_context_len = CRC_CONTEXT_LEN_FW; + + + if (!bundling) { + cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; + } else { + /* + * Configure Bundling if we need to fetch interlaving + * protection PCI accesses + */ + fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; + crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); + crc_ctx_pkt->u.bundling.dseg_count = + cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); + cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; + } + + /* Finish the common fields of CRC pkt */ + crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); + crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); + crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); + crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); + + + /* Walks data segments */ + pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); + + if (!bundling && prm->prot_seg_cnt) { + if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, + prm->tot_dsds, cmd)) + goto crc_queuing_error; + } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, + (prm->tot_dsds - prm->prot_seg_cnt), cmd)) + goto crc_queuing_error; + + if (bundling && prm->prot_seg_cnt) { + /* Walks dif segments */ + pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; + + cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; + if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, + prm->prot_seg_cnt, cmd)) + goto crc_queuing_error; + } + return QLA_SUCCESS; + +crc_queuing_error: + /* Cleanup will be performed by the caller */ + + return QLA_FUNCTION_FAILED; +} + + /* * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * * QLA_TGT_XMIT_STATUS for >= 24xx silicon @@ -2053,9 +2294,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, qlt_check_srr_debug(cmd, &xmit_type); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, - "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " - "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? - 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); + "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", + (xmit_type & QLA_TGT_XMIT_STATUS) ? + 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, + &cmd->se_cmd); res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); @@ -2073,7 +2315,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (unlikely(res)) goto out_unmap_unlock; - res = qlt_24xx_build_ctio_pkt(&prm, vha); + if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) + res = qlt_build_ctio_crc2_pkt(&prm, vha); + else + res = qlt_24xx_build_ctio_pkt(&prm, vha); if (unlikely(res != 0)) goto out_unmap_unlock; @@ -2085,7 +2330,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | CTIO7_FLAGS_STATUS_MODE_0); - qlt_load_data_segments(&prm, vha); + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) + qlt_load_data_segments(&prm, vha); if (prm.add_status_pkt == 0) { if (xmit_type & QLA_TGT_XMIT_STATUS) { @@ -2115,8 +2361,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, ql_dbg(ql_dbg_tgt, vha, 0xe019, "Building additional status packet\n"); + /* + * T10Dif: ctio_crc2_to_fw overlay ontop of + * ctio7_to_24xx + */ memcpy(ctio, pkt, sizeof(*ctio)); + /* reset back to CTIO7 */ ctio->entry_count = 1; + ctio->entry_type = CTIO_TYPE7; ctio->dseg_count = 0; ctio->u.status1.flags &= ~__constant_cpu_to_le16( CTIO7_FLAGS_DATA_IN); @@ -2125,6 +2377,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; pkt->u.status0.flags |= __constant_cpu_to_le16( CTIO7_FLAGS_DONT_RET_CTIO); + + /* qlt_24xx_init_ctio_to_isp will correct + * all neccessary fields that's part of CTIO7. + * There should be no residual of CTIO-CRC2 data. + */ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, &prm); pr_debug("Status CTIO7: %p\n", ctio); @@ -2173,8 +2430,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) return -EIO; - ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", - (int)vha->vp_idx); + ql_dbg(ql_dbg_tgt, vha, 0xe01b, + "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", + __func__, (int)vha->vp_idx, &cmd->se_cmd, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); /* Calculate number of entries and segments required */ if (qlt_pci_map_calc_cnt(&prm) != 0) @@ -2186,14 +2445,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) res = qlt_check_reserve_free_req(vha, prm.req_cnt); if (res != 0) goto out_unlock_free_unmap; + if (cmd->se_cmd.prot_op) + res = qlt_build_ctio_crc2_pkt(&prm, vha); + else + res = qlt_24xx_build_ctio_pkt(&prm, vha); - res = qlt_24xx_build_ctio_pkt(&prm, vha); if (unlikely(res != 0)) goto out_unlock_free_unmap; pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | CTIO7_FLAGS_STATUS_MODE_0); - qlt_load_data_segments(&prm, vha); + + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) + qlt_load_data_segments(&prm, vha); cmd->state = QLA_TGT_STATE_NEED_DATA; @@ -2211,6 +2475,143 @@ out_unlock_free_unmap: } EXPORT_SYMBOL(qlt_rdy_to_xfer); + +/* + * Checks the guard or meta-data for the type of error + * detected by the HBA. + */ +static inline int +qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, + struct ctio_crc_from_fw *sts) +{ + uint8_t *ap = &sts->actual_dif[0]; + uint8_t *ep = &sts->expected_dif[0]; + uint32_t e_ref_tag, a_ref_tag; + uint16_t e_app_tag, a_app_tag; + uint16_t e_guard, a_guard; + uint64_t lba = cmd->se_cmd.t_task_lba; + + a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); + a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); + a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); + + e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); + e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); + e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); + + ql_dbg(ql_dbg_tgt, vha, 0xe075, + "iocb(s) %p Returned STATUS.\n", sts); + + ql_dbg(ql_dbg_tgt, vha, 0xf075, + "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); + + /* + * Ignore sector if: + * For type 3: ref & app tag is all 'f's + * For type 0,1,2: app tag is all 'f's + */ + if ((a_app_tag == 0xffff) && + ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || + (a_ref_tag == 0xffffffff))) { + uint32_t blocks_done; + + /* 2TB boundary case covered automatically with this */ + blocks_done = e_ref_tag - (uint32_t)lba + 1; + cmd->se_cmd.bad_sector = e_ref_tag; + cmd->se_cmd.pi_err = 0; + ql_dbg(ql_dbg_tgt, vha, 0xf074, + "need to return scsi good\n"); + + /* Update protection tag */ + if (cmd->prot_sg_cnt) { + uint32_t i, j = 0, k = 0, num_ent; + struct scatterlist *sg, *sgl; + + + sgl = cmd->prot_sg; + + /* Patch the corresponding protection tags */ + for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { + num_ent = sg_dma_len(sg) / 8; + if (k + num_ent < blocks_done) { + k += num_ent; + continue; + } + j = blocks_done - k - 1; + k = blocks_done; + break; + } + + if (k != blocks_done) { + ql_log(ql_log_warn, vha, 0xf076, + "unexpected tag values tag:lba=%u:%llu)\n", + e_ref_tag, (unsigned long long)lba); + goto out; + } + +#if 0 + struct sd_dif_tuple *spt; + /* TODO: + * This section came from initiator. Is it valid here? + * should ulp be override with actual val??? + */ + spt = page_address(sg_page(sg)) + sg->offset; + spt += j; + + spt->app_tag = 0xffff; + if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) + spt->ref_tag = 0xffffffff; +#endif + } + + return 0; + } + + /* check guard */ + if (e_guard != a_guard) { + cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; + + ql_log(ql_log_warn, vha, 0xe076, + "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, + a_guard, e_guard, cmd); + goto out; + } + + /* check ref tag */ + if (e_ref_tag != a_ref_tag) { + cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + cmd->se_cmd.bad_sector = e_ref_tag; + + ql_log(ql_log_warn, vha, 0xe077, + "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, + a_guard, e_guard, cmd); + goto out; + } + + /* check appl tag */ + if (e_app_tag != a_app_tag) { + cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; + + ql_log(ql_log_warn, vha, 0xe078, + "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, + a_guard, e_guard, cmd); + goto out; + } +out: + return 1; +} + + /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, @@ -2221,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; request_t *pkt; int ret = 0; + uint16_t temp; ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); @@ -2257,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio24->u.status1.ox_id = cpu_to_le16(temp); /* Most likely, it isn't needed */ ctio24->u.status1.residual = get_unaligned((uint32_t *) @@ -2287,21 +2690,46 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, rc = __qlt_send_term_exchange(vha, cmd, atio); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); done: - if (rc == 1) { + /* + * Terminate exchange will tell fw to release any active CTIO + * that's in FW posession and cleanup the exchange. + * + * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still + * down at FW. Free the cmd later when CTIO comes back later + * w/aborted(0x2) status. + * + * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already + * back w/some err. Free the cmd now. + */ + if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) { if (!ha_locked && !in_interrupt()) msleep(250); /* just in case */ + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); } + return; } void qlt_free_cmd(struct qla_tgt_cmd *cmd) { - BUG_ON(cmd->sg_mapped); + struct qla_tgt_sess *sess = cmd->sess; + + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, + "%s: se_cmd[%p] ox_id %04x\n", + __func__, &cmd->se_cmd, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + BUG_ON(cmd->sg_mapped); if (unlikely(cmd->free_sg)) kfree(cmd->sg); - kmem_cache_free(qla_tgt_cmd_cachep, cmd); + + if (!sess || !sess->se_sess) { + WARN_ON(1); + return; + } + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); } EXPORT_SYMBOL(qlt_free_cmd); @@ -2310,8 +2738,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, void *ctio) { struct qla_tgt_srr_ctio *sc; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_srr_imm *imm; tgt->ctio_srr_id++; @@ -2507,6 +2934,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, case CTIO_LIP_RESET: case CTIO_TARGET_RESET: case CTIO_ABORTED: + /* driver request abort via Terminate exchange */ case CTIO_TIMEOUT: case CTIO_INVALID_RX_ID: /* They are OK */ @@ -2537,18 +2965,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, else return; + case CTIO_DIF_ERROR: { + struct ctio_crc_from_fw *crc = + (struct ctio_crc_from_fw *)ctio; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, + "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", + vha->vp_idx, status, cmd->state, se_cmd, + *((u64 *)&crc->actual_dif[0]), + *((u64 *)&crc->expected_dif[0])); + + if (qlt_handle_dif_error(vha, cmd, ctio)) { + if (cmd->state == QLA_TGT_STATE_NEED_DATA) { + /* scsi Write/xfer rdy complete */ + goto skip_term; + } else { + /* scsi read/xmit respond complete + * call handle dif to send scsi status + * rather than terminate exchange. + */ + cmd->state = QLA_TGT_STATE_PROCESSED; + ha->tgt.tgt_ops->handle_dif_err(cmd); + return; + } + } else { + /* Need to generate a SCSI good completion. + * because FW did not send scsi status. + */ + status = 0; + goto skip_term; + } + break; + } default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, - "qla_target(%d): CTIO with error status " - "0x%x received (state %x, se_cmd %p\n", + "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", vha->vp_idx, status, cmd->state, se_cmd); break; } - if (cmd->state != QLA_TGT_STATE_NEED_DATA) + + /* "cmd->state == QLA_TGT_STATE_ABORTED" means + * cmd is already aborted/terminated, we don't + * need to terminate again. The exchange is already + * cleaned up/freed at FW level. Just cleanup at driver + * level. + */ + if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && + (cmd->state != QLA_TGT_STATE_ABORTED)) { if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; + } } +skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); @@ -2577,7 +3045,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, "not return a CTIO complete\n", vha->vp_idx, cmd->state); } - if (unlikely(status != CTIO_SUCCESS)) { + if (unlikely(status != CTIO_SUCCESS) && + (cmd->state != QLA_TGT_STATE_ABORTED)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); dump_stack(); } @@ -2585,25 +3054,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, ha->tgt.tgt_ops->free_cmd(cmd); } -/* ha->hardware_lock supposed to be held on entry */ -/* called via callback from qla2xxx */ -void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle) -{ - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; - - if (likely(tgt == NULL)) { - ql_dbg(ql_dbg_tgt, vha, 0xe021, - "CTIO, but target mode not enabled" - " (ha %d %p handle %#x)", vha->vp_idx, ha, handle); - return; - } - - tgt->irq_cmd_count++; - qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL); - tgt->irq_cmd_count--; -} - static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, uint8_t task_codes) { @@ -2641,13 +3091,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, /* * Process context for I/O path into tcm_qla2xxx code */ -static void qlt_do_work(struct work_struct *work) +static void __qlt_do_work(struct qla_tgt_cmd *cmd) { - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; - struct qla_tgt_sess *sess = NULL; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_tgt_sess *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; @@ -2657,41 +3106,6 @@ static void qlt_do_work(struct work_struct *work) if (tgt->tgt_stop) goto out_term; - spin_lock_irqsave(&ha->hardware_lock, flags); - sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, - atio->u.isp24.fcp_hdr.s_id); - /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ - if (sess) - kref_get(&sess->se_sess->sess_kref); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (unlikely(!sess)) { - uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, - "qla_target(%d): Unable to find wwn login" - " (s_id %x:%x:%x), trying to create it manually\n", - vha->vp_idx, s_id[0], s_id[1], s_id[2]); - - if (atio->u.raw.entry_count > 1) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, - "Dropping multy entry cmd %p\n", cmd); - goto out_term; - } - - mutex_lock(&ha->tgt.tgt_mutex); - sess = qlt_make_local_sess(vha, s_id); - /* sess has an extra creation ref. */ - mutex_unlock(&ha->tgt.tgt_mutex); - - if (!sess) - goto out_term; - } - - cmd->sess = sess; - cmd->loop_id = sess->loop_id; - cmd->conf_compl_supported = sess->conf_compl_supported; - cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( @@ -2715,17 +3129,20 @@ static void qlt_do_work(struct work_struct *work) atio->u.isp24.fcp_cmnd.add_cdb_len])); ql_dbg(ql_dbg_tgt, vha, 0xe022, - "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", - cmd, cmd->unpacked_lun, cmd->tag); + "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", + cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, + cmd->atio.u.isp24.fcp_hdr.ox_id); - ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, - fcp_task_attr, data_dir, bidi); + ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, + fcp_task_attr, data_dir, bidi); if (ret != 0) goto out_term; /* * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( */ + spin_lock_irqsave(&ha->hardware_lock, flags); ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return; out_term: @@ -2736,10 +3153,105 @@ out_term: */ spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); - kmem_cache_free(qla_tgt_cmd_cachep, cmd); + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (sess) +} + +static void qlt_do_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + + __qlt_do_work(cmd); +} + +static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, + struct qla_tgt_sess *sess, + struct atio_from_isp *atio) +{ + struct se_session *se_sess = sess->se_sess; + struct qla_tgt_cmd *cmd; + int tag; + + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); + if (tag < 0) + return NULL; + + cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct qla_tgt_cmd)); + + memcpy(&cmd->atio, atio, sizeof(*atio)); + cmd->state = QLA_TGT_STATE_NEW; + cmd->tgt = vha->vha_tgt.qla_tgt; + cmd->vha = vha; + cmd->se_cmd.map_tag = tag; + cmd->sess = sess; + cmd->loop_id = sess->loop_id; + cmd->conf_compl_supported = sess->conf_compl_supported; + + return cmd; +} + +static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, + uint16_t); + +static void qlt_create_sess_from_atio(struct work_struct *work) +{ + struct qla_tgt_sess_op *op = container_of(work, + struct qla_tgt_sess_op, work); + scsi_qla_host_t *vha = op->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + struct qla_tgt_cmd *cmd; + unsigned long flags; + uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, + "qla_target(%d): Unable to find wwn login" + " (s_id %x:%x:%x), trying to create it manually\n", + vha->vp_idx, s_id[0], s_id[1], s_id[2]); + + if (op->atio.u.raw.entry_count > 1) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, + "Dropping multy entry atio %p\n", &op->atio); + goto out_term; + } + + mutex_lock(&vha->vha_tgt.tgt_mutex); + sess = qlt_make_local_sess(vha, s_id); + /* sess has an extra creation ref. */ + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + if (!sess) + goto out_term; + /* + * Now obtain a pre-allocated session tag using the original op->atio + * packet header, and dispatch into __qlt_do_work() using the existing + * process context. + */ + cmd = qlt_get_tag(vha, sess, &op->atio); + if (!cmd) { + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + kfree(op); + return; + } + /* + * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release + * the extra reference taken above by qlt_make_local_sess() + */ + __qlt_do_work(cmd); + kfree(op); + return; + +out_term: + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_term_exchange(vha, NULL, &op->atio, 1); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + kfree(op); + } /* ha->hardware_lock supposed to be held on entry */ @@ -2747,7 +3259,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, struct atio_from_isp *atio) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_tgt_sess *sess; struct qla_tgt_cmd *cmd; if (unlikely(tgt->tgt_stop)) { @@ -2756,20 +3269,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -EFAULT; } - cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); + if (unlikely(!sess)) { + struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), + GFP_ATOMIC); + if (!op) + return -ENOMEM; + + memcpy(&op->atio, atio, sizeof(*atio)); + INIT_WORK(&op->work, qlt_create_sess_from_atio); + queue_work(qla_tgt_wq, &op->work); + return 0; + } + /* + * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. + */ + kref_get(&sess->se_sess->sess_kref); + + cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); + ha->tgt.tgt_ops->put_sess(sess); return -ENOMEM; } - INIT_LIST_HEAD(&cmd->cmd_list); - - memcpy(&cmd->atio, atio, sizeof(*atio)); - cmd->state = QLA_TGT_STATE_NEW; - cmd->tgt = ha->tgt.qla_tgt; - cmd->vha = vha; - INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -2893,7 +3417,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) uint32_t lun, unpacked_lun; int lun_size, fn; - tgt = ha->tgt.qla_tgt; + tgt = vha->vha_tgt.qla_tgt; lun = a->u.isp24.fcp_cmnd.lun; lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); @@ -2967,7 +3491,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, "qla_target(%d): task abort for unexisting " "session\n", vha->vp_idx); - return qlt_sched_sess_work(ha->tgt.qla_tgt, + return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); } @@ -2980,14 +3504,11 @@ static int qlt_abort_task(struct scsi_qla_host *vha, static int qlt_24xx_handle_els(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { - struct qla_hw_data *ha = vha->hw; int res = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, - "qla_target(%d): Port ID: 0x%02x:%02x:%02x" - " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0], - iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2], - iocb->u.isp24.status_subcode); + "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", + vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); switch (iocb->u.isp24.status_subcode) { case ELS_PLOGI: @@ -3000,7 +3521,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, case ELS_PDISC: case ELS_ADISC: { - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(vha, &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); @@ -3358,7 +3879,8 @@ restart: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, "SRR cmd %p (se_cmd %p, tag %d, op %x), " "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, - se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); + se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, + cmd->sg_cnt, cmd->offset); qlt_handle_srr(vha, sctio, imm); @@ -3374,8 +3896,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { struct qla_tgt_srr_imm *imm; - struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_srr_ctio *sctio; tgt->imm_srr_id++; @@ -3485,7 +4006,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, case IMM_NTFY_LIP_LINK_REINIT: { - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, "qla_target(%d): LINK REINIT (loop %#x, " "subcode %x)\n", vha->vp_idx, @@ -3661,7 +4182,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, struct atio_from_isp *atio) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int rc; if (unlikely(tgt == NULL)) { @@ -3683,11 +4204,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, switch (atio->u.raw.entry_type) { case ATIO_TYPE7: ql_dbg(ql_dbg_tgt, vha, 0xe02d, - "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " - "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", + "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n", vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, atio->u.isp24.fcp_cmnd.rddata, atio->u.isp24.fcp_cmnd.wrdata, + atio->u.isp24.fcp_cmnd.cdb[0], atio->u.isp24.fcp_cmnd.add_cdb_len, be32_to_cpu(get_unaligned((uint32_t *) &atio->u.isp24.fcp_cmnd.add_cdb[ @@ -3763,7 +4284,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe05d, @@ -3785,11 +4306,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) tgt->irq_cmd_count++; switch (pkt->entry_type) { + case CTIO_CRC2: case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; - ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", - vha->vp_idx); + ql_dbg(ql_dbg_tgt, vha, 0xe030, + "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", + entry->entry_type, vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); @@ -3966,7 +4489,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, uint16_t *mailbox) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; int login_code; ql_dbg(ql_dbg_tgt, vha, 0xe039, @@ -4096,14 +4619,14 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha, uint8_t *s_id) { - struct qla_hw_data *ha = vha->hw; struct qla_tgt_sess *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; retry: - global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); + global_resets = + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); rc = qla24xx_get_loop_id(vha, s_id, &loop_id); if (rc != 0) { @@ -4130,12 +4653,13 @@ retry: return NULL; if (global_resets != - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { + atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, "qla_target(%d): global reset during session discovery " "(counter was %d, new %d), retrying", vha->vp_idx, global_resets, - atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); + atomic_read(&vha->vha_tgt. + qla_tgt->tgt_global_resets_count)); goto retry; } @@ -4170,10 +4694,10 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); if (!sess) @@ -4188,16 +4712,16 @@ static void qlt_abort_work(struct qla_tgt *tgt, rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); if (rc != 0) goto out_term; - spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return; out_term: qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); - spin_unlock_irqrestore(&ha->hardware_lock, flags); if (sess) ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); } static void qlt_tmr_work(struct qla_tgt *tgt, @@ -4224,10 +4748,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (!sess) { spin_unlock_irqrestore(&ha->hardware_lock, flags); - mutex_lock(&ha->tgt.tgt_mutex); + mutex_lock(&vha->vha_tgt.tgt_mutex); sess = qlt_make_local_sess(vha, s_id); /* sess has got an extra creation ref */ - mutex_unlock(&ha->tgt.tgt_mutex); + mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); if (!sess) @@ -4245,16 +4769,16 @@ static void qlt_tmr_work(struct qla_tgt *tgt, rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); if (rc != 0) goto out_term; - spin_unlock_irqrestore(&ha->hardware_lock, flags); ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return; out_term: qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1); - spin_unlock_irqrestore(&ha->hardware_lock, flags); if (sess) ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); } static void qlt_sess_work_fn(struct work_struct *work) @@ -4313,9 +4837,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) } ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, - "Registering target for host %ld(%p)", base_vha->host_no, ha); + "Registering target for host %ld(%p).\n", base_vha->host_no, ha); - BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); + BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); if (!tgt) { @@ -4343,7 +4867,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) INIT_WORK(&tgt->srr_work, qlt_handle_srr_work); atomic_set(&tgt->tgt_global_resets_count, 0); - ha->tgt.qla_tgt = tgt; + base_vha->vha_tgt.qla_tgt = tgt; ql_dbg(ql_dbg_tgt, base_vha, 0xe067, "qla_target(%d): using 64 Bit PCI addressing", @@ -4354,6 +4878,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; + if (base_vha->fc_vport) + return 0; + mutex_lock(&qla_tgt_mutex); list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); mutex_unlock(&qla_tgt_mutex); @@ -4364,16 +4891,20 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) /* Must be called under tgt_host_action_mutex */ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) { - if (!ha->tgt.qla_tgt) + if (!vha->vha_tgt.qla_tgt) return 0; + if (vha->fc_vport) { + qlt_release(vha->vha_tgt.qla_tgt); + return 0; + } mutex_lock(&qla_tgt_mutex); - list_del(&ha->tgt.qla_tgt->tgt_list_entry); + list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); mutex_unlock(&qla_tgt_mutex); ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", vha->host_no, ha); - qlt_release(ha->tgt.qla_tgt); + qlt_release(vha->vha_tgt.qla_tgt); return 0; } @@ -4407,8 +4938,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, * @callback: lport initialization callback for tcm_qla2xxx code * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data */ -int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, - int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) +int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, + u64 npiv_wwpn, u64 npiv_wwnn, + int (*callback)(struct scsi_qla_host *, void *, u64, u64)) { struct qla_tgt *tgt; struct scsi_qla_host *vha; @@ -4427,19 +4959,22 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, if (!host) continue; - if (ha->tgt.tgt_ops != NULL) - continue; - if (!(host->hostt->supported_mode & MODE_TARGET)) continue; spin_lock_irqsave(&ha->hardware_lock, flags); - if (host->active_mode & MODE_TARGET) { + if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", host->host_no); spin_unlock_irqrestore(&ha->hardware_lock, flags); continue; } + if (tgt->tgt_stop) { + pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", + host->host_no); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + continue; + } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!scsi_host_get(host)) { @@ -4448,22 +4983,16 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, " qla2xxx scsi_host\n"); continue; } - qlt_lport_dump(vha, wwpn, b); + qlt_lport_dump(vha, phys_wwpn, b); if (memcmp(vha->port_name, b, WWN_SIZE)) { scsi_host_put(host); continue; } - /* - * Setup passed parameters ahead of invoking callback - */ - ha->tgt.tgt_ops = qla_tgt_ops; - ha->tgt.target_lport_ptr = target_lport_ptr; - rc = (*callback)(vha); - if (rc != 0) { - ha->tgt.tgt_ops = NULL; - ha->tgt.target_lport_ptr = NULL; - } + rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); + if (rc != 0) + scsi_host_put(host); + mutex_unlock(&qla_tgt_mutex); return rc; } @@ -4485,7 +5014,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha) /* * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data */ - ha->tgt.target_lport_ptr = NULL; + vha->vha_tgt.target_lport_ptr = NULL; ha->tgt.tgt_ops = NULL; /* * Release the Scsi_Host reference for the underlying qla2xxx host @@ -4547,8 +5076,9 @@ void qlt_enable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); if (!tgt) { ql_dbg(ql_dbg_tgt, vha, 0xe069, @@ -4563,9 +5093,14 @@ qlt_enable_vha(struct scsi_qla_host *vha) qlt_set_mode(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); - qla2x00_wait_for_hba_online(vha); + if (vha->vp_idx) { + qla24xx_disable_vp(vha); + qla24xx_enable_vp(vha); + } else { + set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); + qla2x00_wait_for_hba_online(base_vha); + } } EXPORT_SYMBOL(qlt_enable_vha); @@ -4578,7 +5113,7 @@ void qlt_disable_vha(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; - struct qla_tgt *tgt = ha->tgt.qla_tgt; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; unsigned long flags; if (!tgt) { @@ -4609,8 +5144,10 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) if (!qla_tgt_mode_enabled(vha)) return; - mutex_init(&ha->tgt.tgt_mutex); - mutex_init(&ha->tgt.tgt_host_action_mutex); + vha->vha_tgt.qla_tgt = NULL; + + mutex_init(&vha->vha_tgt.tgt_mutex); + mutex_init(&vha->vha_tgt.tgt_host_action_mutex); qlt_clear_mode(vha); @@ -4621,6 +5158,8 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) * assigning the value appropriately. */ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + + qlt_add_target(ha, vha); } void @@ -4908,6 +5447,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha, case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: + case CTIO_CRC2: return 1; default: return 0; @@ -4939,8 +5479,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; } - mutex_init(&ha->tgt.tgt_mutex); - mutex_init(&ha->tgt.tgt_host_action_mutex); + mutex_init(&base_vha->vha_tgt.tgt_mutex); + mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); qlt_clear_mode(base_vha); } @@ -5051,23 +5591,13 @@ int __init qlt_init(void) if (!QLA_TGT_MODE_ENABLED()) return 0; - qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", - sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, - NULL); - if (!qla_tgt_cmd_cachep) { - ql_log(ql_log_fatal, NULL, 0xe06c, - "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); - return -ENOMEM; - } - qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct qla_tgt_mgmt_cmd), 0, NULL); if (!qla_tgt_mgmt_cmd_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); - ret = -ENOMEM; - goto out; + return -ENOMEM; } qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, @@ -5095,8 +5625,6 @@ out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); -out: - kmem_cache_destroy(qla_tgt_cmd_cachep); return ret; } @@ -5108,5 +5636,4 @@ void qlt_exit(void) destroy_workqueue(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); - kmem_cache_destroy(qla_tgt_cmd_cachep); } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index ff9ccb9fd03..d1d24fb0160 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -293,6 +293,7 @@ struct ctio_to_2xxx { #define CTIO_ABORTED 0x02 #define CTIO_INVALID_RX_ID 0x08 #define CTIO_TIMEOUT 0x0B +#define CTIO_DIF_ERROR 0x0C /* DIF error detected */ #define CTIO_LIP_RESET 0x0E #define CTIO_TARGET_RESET 0x17 #define CTIO_PORT_UNAVAILABLE 0x28 @@ -315,7 +316,7 @@ struct fcp_hdr { uint8_t seq_id; uint8_t df_ctl; uint16_t seq_cnt; - uint16_t ox_id; + __be16 ox_id; uint16_t rx_id; uint32_t parameter; } __packed; @@ -440,9 +441,9 @@ struct ctio7_to_24xx { union { struct { uint16_t reserved1; - uint16_t flags; + __le16 flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint32_t relative_offset; uint32_t reserved2; @@ -457,7 +458,7 @@ struct ctio7_to_24xx { uint16_t sense_length; uint16_t flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint16_t response_len; uint16_t reserved; @@ -498,11 +499,12 @@ struct ctio7_from_24xx { #define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 #define CTIO7_FLAGS_STATUS_MODE_0 0 #define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 +#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7 #define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 #define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 #define CTIO7_FLAGS_DSD_PTR BIT_2 -#define CTIO7_FLAGS_DATA_IN BIT_1 -#define CTIO7_FLAGS_DATA_OUT BIT_0 +#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */ +#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */ #define ELS_PLOGI 0x3 #define ELS_FLOGI 0x4 @@ -514,6 +516,68 @@ struct ctio7_from_24xx { #define ELS_ADISC 0x52 /* + *CTIO Type CRC_2 IOCB + */ +struct ctio_crc2_to_fw { + uint8_t entry_type; /* Entry type. */ +#define CTIO_CRC2 0x7A + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint16_t nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ + + uint16_t dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t add_flags; /* additional flags */ +#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 + + uint8_t initiator_id[3]; /* initiator ID */ + uint8_t reserved1; + uint32_t exchange_addr; /* rcv exchange address */ + uint16_t reserved2; + __le16 flags; /* refer to CTIO7 flags values */ + uint32_t residual; + __le16 ox_id; + uint16_t scsi_status; + __le32 relative_offset; + uint32_t reserved5; + __le32 transfer_length; /* total fc transfer length */ + uint32_t reserved6; + __le32 crc_context_address[2];/* Data segment address. */ + uint16_t crc_context_len; /* Data segment length. */ + uint16_t reserved_1; /* MUST be set to 0. */ +} __packed; + +/* CTIO Type CRC_x Status IOCB */ +struct ctio_crc_from_fw { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint16_t status; + uint16_t timeout; /* Command timeout. */ + uint16_t dseg_count; /* Data segment count. */ + uint32_t reserved1; + uint16_t state_flags; +#define CTIO_CRC_SF_DIF_CHOPPED BIT_4 + + uint32_t exchange_address; /* rcv exchange address */ + uint16_t reserved2; + uint16_t flags; + uint32_t resid_xfer_length; + uint16_t ox_id; + uint8_t reserved3[12]; + uint16_t runt_guard; /* reported runt blk guard */ + uint8_t actual_dif[8]; + uint8_t expected_dif[8]; +} __packed; + +/* * ISP queue - ABTS received/response entries structure definition for 24xx. */ #define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ @@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); + void (*handle_dif_err)(struct qla_tgt_cmd *); int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); @@ -805,6 +870,12 @@ struct qla_tgt { struct list_head tgt_list_entry; }; +struct qla_tgt_sess_op { + struct scsi_qla_host *vha; + struct atio_from_isp atio; + struct work_struct work; +}; + /* * Equivilant to IT Nexus (Initiator-Target) */ @@ -829,9 +900,9 @@ struct qla_tgt_sess { }; struct qla_tgt_cmd { + struct se_cmd se_cmd; struct qla_tgt_sess *sess; int state; - struct se_cmd se_cmd; struct work_struct free_work; struct work_struct work; /* Sense buffer that will be mapped into outgoing status */ @@ -843,6 +914,7 @@ struct qla_tgt_cmd { unsigned int free_sg:1; unsigned int aborted:1; /* Needed in case of SRR */ unsigned int write_data_transferred:1; + unsigned int ctx_dsd_alloced:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -855,9 +927,14 @@ struct qla_tgt_cmd { uint16_t loop_id; /* to save extra sess dereferences */ struct qla_tgt *tgt; /* to save extra sess dereferences */ struct scsi_qla_host *vha; - struct list_head cmd_list; struct atio_from_isp atio; + /* t10dif */ + struct scatterlist *prot_sg; + uint32_t prot_sg_cnt; + uint32_t blk_sz; + struct crc_context *ctx; + }; struct qla_tgt_sess_work_param { @@ -902,6 +979,10 @@ struct qla_tgt_prm { int sense_buffer_len; int residual; int add_status_pkt; + /* dif */ + struct scatterlist *prot_sg; + uint16_t prot_seg_cnt; + uint16_t tot_dsds; }; struct qla_tgt_srr_imm { @@ -932,8 +1013,8 @@ void qlt_disable_vha(struct scsi_qla_host *); */ extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *); extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); -extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64, - int (*callback)(struct scsi_qla_host *), void *); +extern int qlt_lport_register(void *, u64, u64, u64, + int (*callback)(struct scsi_qla_host *, void *, u64, u64)); extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct qla_tgt_sess *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); @@ -977,10 +1058,11 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *, extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *); +extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); -extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t); extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *); extern void qlt_enable_vha(struct scsi_qla_host *); extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *); @@ -1003,7 +1085,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *, extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); extern int qlt_mem_alloc(struct qla_hw_data *); extern void qlt_mem_free(struct qla_hw_data *); -extern void qlt_stop_phase1(struct qla_tgt *); +extern int qlt_stop_phase1(struct qla_tgt *); extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); extern void qlt_83xx_iospace_config(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c new file mode 100644 index 00000000000..cb9a0c4bc41 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -0,0 +1,956 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#include "qla_def.h" +#include "qla_tmpl.h" + +/* note default template is in big endian */ +static const uint32_t ql27xx_fwdt_default_template[] = { + 0x63000000, 0xa4000000, 0x7c050000, 0x00000000, + 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x04010000, 0x14000000, 0x00000000, + 0x02000000, 0x44000000, 0x09010000, 0x10000000, + 0x00000000, 0x02000000, 0x01010000, 0x1c000000, + 0x00000000, 0x02000000, 0x00600000, 0x00000000, + 0xc0000000, 0x01010000, 0x1c000000, 0x00000000, + 0x02000000, 0x00600000, 0x00000000, 0xcc000000, + 0x01010000, 0x1c000000, 0x00000000, 0x02000000, + 0x10600000, 0x00000000, 0xd4000000, 0x01010000, + 0x1c000000, 0x00000000, 0x02000000, 0x700f0000, + 0x00000060, 0xf0000000, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x00700000, 0x041000c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x10700000, 0x041000c0, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x40700000, 0x041000c0, + 0x01010000, 0x1c000000, 0x00000000, 0x02000000, + 0x007c0000, 0x01000000, 0xc0000000, 0x00010000, + 0x18000000, 0x00000000, 0x02000000, 0x007c0000, + 0x040300c4, 0x00010000, 0x18000000, 0x00000000, + 0x02000000, 0x007c0000, 0x040100c0, 0x01010000, + 0x1c000000, 0x00000000, 0x02000000, 0x007c0000, + 0x00000000, 0xc0000000, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x007c0000, 0x04200000, + 0x0b010000, 0x18000000, 0x00000000, 0x02000000, + 0x0c000000, 0x00000000, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000000b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000010b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000020b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000030b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000040b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000050b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000060b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000070b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000080b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x000090b0, 0x02010000, 0x20000000, + 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, + 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x0a000000, 0x040100c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x0a000000, 0x04200080, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x00be0000, 0x041000c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x10be0000, 0x041000c0, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x20be0000, 0x041000c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x30be0000, 0x041000c0, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x00b00000, 0x041000c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x10b00000, 0x041000c0, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x20b00000, 0x041000c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x30b00000, 0x041000c0, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x00300000, 0x041000c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x10300000, 0x041000c0, 0x00010000, 0x18000000, + 0x00000000, 0x02000000, 0x20300000, 0x041000c0, + 0x00010000, 0x18000000, 0x00000000, 0x02000000, + 0x30300000, 0x041000c0, 0x0a010000, 0x10000000, + 0x00000000, 0x02000000, 0x06010000, 0x1c000000, + 0x00000000, 0x02000000, 0x01000000, 0x00000200, + 0xff230200, 0x06010000, 0x1c000000, 0x00000000, + 0x02000000, 0x02000000, 0x00001000, 0x00000000, + 0x07010000, 0x18000000, 0x00000000, 0x02000000, + 0x00000000, 0x01000000, 0x07010000, 0x18000000, + 0x00000000, 0x02000000, 0x00000000, 0x02000000, + 0x07010000, 0x18000000, 0x00000000, 0x02000000, + 0x00000000, 0x03000000, 0x0d010000, 0x14000000, + 0x00000000, 0x02000000, 0x00000000, 0xff000000, + 0x10000000, 0x00000000, 0x00000080, +}; + +static inline void __iomem * +qla27xx_isp_reg(struct scsi_qla_host *vha) +{ + return &vha->hw->iobase->isp24; +} + +static inline void +qla27xx_insert16(uint16_t value, void *buf, ulong *len) +{ + if (buf) { + buf += *len; + *(__le16 *)buf = cpu_to_le16(value); + } + *len += sizeof(value); +} + +static inline void +qla27xx_insert32(uint32_t value, void *buf, ulong *len) +{ + if (buf) { + buf += *len; + *(__le32 *)buf = cpu_to_le32(value); + } + *len += sizeof(value); +} + +static inline void +qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) +{ + ulong cnt = size; + + if (buf && mem) { + buf += *len; + while (cnt >= sizeof(uint32_t)) { + *(__le32 *)buf = cpu_to_le32p(mem); + buf += sizeof(uint32_t); + mem += sizeof(uint32_t); + cnt -= sizeof(uint32_t); + } + if (cnt) + memcpy(buf, mem, cnt); + } + *len += size; +} + +static inline void +qla27xx_read8(void *window, void *buf, ulong *len) +{ + uint8_t value = ~0; + + if (buf) { + value = RD_REG_BYTE((__iomem void *)window); + ql_dbg(ql_dbg_misc, NULL, 0xd011, + "%s: -> %x\n", __func__, value); + } + qla27xx_insert32(value, buf, len); +} + +static inline void +qla27xx_read16(void *window, void *buf, ulong *len) +{ + uint16_t value = ~0; + + if (buf) { + value = RD_REG_WORD((__iomem void *)window); + ql_dbg(ql_dbg_misc, NULL, 0xd012, + "%s: -> %x\n", __func__, value); + } + qla27xx_insert32(value, buf, len); +} + +static inline void +qla27xx_read32(void *window, void *buf, ulong *len) +{ + uint32_t value = ~0; + + if (buf) { + value = RD_REG_DWORD((__iomem void *)window); + ql_dbg(ql_dbg_misc, NULL, 0xd013, + "%s: -> %x\n", __func__, value); + } + qla27xx_insert32(value, buf, len); +} + +static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *) +{ + return + (width == 1) ? qla27xx_read8 : + (width == 2) ? qla27xx_read16 : + qla27xx_read32; +} + +static inline void +qla27xx_read_reg(__iomem struct device_reg_24xx *reg, + uint offset, void *buf, ulong *len) +{ + void *window = (void *)reg + offset; + + if (buf) { + ql_dbg(ql_dbg_misc, NULL, 0xd014, + "%s: @%x\n", __func__, offset); + } + qla27xx_read32(window, buf, len); +} + +static inline void +qla27xx_write_reg(__iomem struct device_reg_24xx *reg, + uint offset, uint32_t data, void *buf) +{ + __iomem void *window = reg + offset; + + if (buf) { + ql_dbg(ql_dbg_misc, NULL, 0xd015, + "%s: @%x <- %x\n", __func__, offset, data); + WRT_REG_DWORD(window, data); + } +} + +static inline void +qla27xx_read_window(__iomem struct device_reg_24xx *reg, + uint32_t addr, uint offset, uint count, uint width, void *buf, + ulong *len) +{ + void *window = (void *)reg + offset; + void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width); + + if (buf) { + ql_dbg(ql_dbg_misc, NULL, 0xd016, + "%s: base=%x offset=%x count=%x width=%x\n", + __func__, addr, offset, count, width); + } + qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); + while (count--) { + qla27xx_insert32(addr, buf, len); + readn(window, buf, len); + window += width; + addr++; + } +} + +static inline void +qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) +{ + if (buf) + ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; +} + +static int +qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd100, + "%s: nop [%lx]\n", __func__, *len); + qla27xx_skip_entry(ent, buf); + + return false; +} + +static int +qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd1ff, + "%s: end [%lx]\n", __func__, *len); + qla27xx_skip_entry(ent, buf); + + /* terminate */ + return true; +} + +static int +qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd200, + "%s: rdio t1 [%lx]\n", __func__, *len); + qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset, + ent->t256.reg_count, ent->t256.reg_width, buf, len); + + return false; +} + +static int +qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd201, + "%s: wrio t1 [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf); + qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf); + + return false; +} + +static int +qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd202, + "%s: rdio t2 [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf); + qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset, + ent->t258.reg_count, ent->t258.reg_width, buf, len); + + return false; +} + +static int +qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd203, + "%s: wrio t2 [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf); + qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf); + qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf); + + return false; +} + +static int +qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd204, + "%s: rdpci [%lx]\n", __func__, *len); + qla27xx_insert32(ent->t260.pci_offset, buf, len); + qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len); + + return false; +} + +static int +qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd205, + "%s: wrpci [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf); + + return false; +} + +static int +qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong dwords; + ulong start; + ulong end; + + ql_dbg(ql_dbg_misc, vha, 0xd206, + "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); + start = ent->t262.start_addr; + end = ent->t262.end_addr; + + if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) { + ; + } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) { + end = vha->hw->fw_memory_size; + if (buf) + ent->t262.end_addr = end; + } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) { + start = vha->hw->fw_shared_ram_start; + end = vha->hw->fw_shared_ram_end; + if (buf) { + ent->t262.start_addr = start; + ent->t262.end_addr = end; + } + } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) { + ql_dbg(ql_dbg_misc, vha, 0xd021, + "%s: unsupported ddr ram\n", __func__); + qla27xx_skip_entry(ent, buf); + goto done; + } else { + ql_dbg(ql_dbg_misc, vha, 0xd022, + "%s: unknown area %u\n", __func__, ent->t262.ram_area); + qla27xx_skip_entry(ent, buf); + goto done; + } + + if (end < start || end == 0) { + ql_dbg(ql_dbg_misc, vha, 0xd023, + "%s: unusable range (start=%x end=%x)\n", __func__, + ent->t262.end_addr, ent->t262.start_addr); + qla27xx_skip_entry(ent, buf); + goto done; + } + + dwords = end - start + 1; + if (buf) { + ql_dbg(ql_dbg_misc, vha, 0xd024, + "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); + buf += *len; + qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); + } + *len += dwords * sizeof(uint32_t); +done: + return false; +} + +static int +qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint count = 0; + uint i; + uint length; + + ql_dbg(ql_dbg_misc, vha, 0xd207, + "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len); + if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { + for (i = 0; i < vha->hw->max_req_queues; i++) { + struct req_que *req = vha->hw->req_q_map[i]; + if (req || !buf) { + length = req ? + req->length : REQUEST_ENTRY_CNT_24XX; + qla27xx_insert16(i, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(req ? req->ring : NULL, + length * sizeof(*req->ring), buf, len); + count++; + } + } + } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { + for (i = 0; i < vha->hw->max_rsp_queues; i++) { + struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + if (rsp || !buf) { + length = rsp ? + rsp->length : RESPONSE_ENTRY_CNT_MQ; + qla27xx_insert16(i, buf, len); + qla27xx_insert16(length, buf, len); + qla27xx_insertbuf(rsp ? rsp->ring : NULL, + length * sizeof(*rsp->ring), buf, len); + count++; + } + } + } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { + ql_dbg(ql_dbg_misc, vha, 0xd025, + "%s: unsupported atio queue\n", __func__); + qla27xx_skip_entry(ent, buf); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd026, + "%s: unknown queue %u\n", __func__, ent->t263.queue_type); + qla27xx_skip_entry(ent, buf); + } + + if (buf) + ent->t263.num_queues = count; + + return false; +} + +static int +qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd208, + "%s: getfce [%lx]\n", __func__, *len); + if (vha->hw->fce) { + if (buf) { + ent->t264.fce_trace_size = FCE_SIZE; + ent->t264.write_pointer = vha->hw->fce_wr; + ent->t264.base_pointer = vha->hw->fce_dma; + ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0]; + ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2]; + ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3]; + ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4]; + ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5]; + ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6]; + } + qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd027, + "%s: missing fce\n", __func__); + qla27xx_skip_entry(ent, buf); + } + + return false; +} + +static int +qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd209, + "%s: pause risc [%lx]\n", __func__, *len); + if (buf) + qla24xx_pause_risc(reg, vha->hw); + + return false; +} + +static int +qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd20a, + "%s: reset risc [%lx]\n", __func__, *len); + if (buf) + qla24xx_soft_reset(vha->hw); + + return false; +} + +static int +qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc, vha, 0xd20b, + "%s: dis intr [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf); + + return false; +} + +static int +qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd20c, + "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); + if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) { + if (vha->hw->eft) { + if (buf) { + ent->t268.buf_size = EFT_SIZE; + ent->t268.start_addr = vha->hw->eft_dma; + } + qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd028, + "%s: missing eft\n", __func__); + qla27xx_skip_entry(ent, buf); + } + } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) { + ql_dbg(ql_dbg_misc, vha, 0xd029, + "%s: unsupported exchange offload buffer\n", __func__); + qla27xx_skip_entry(ent, buf); + } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) { + ql_dbg(ql_dbg_misc, vha, 0xd02a, + "%s: unsupported extended login buffer\n", __func__); + qla27xx_skip_entry(ent, buf); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd02b, + "%s: unknown buf %x\n", __func__, ent->t268.buf_type); + qla27xx_skip_entry(ent, buf); + } + + return false; +} + +static int +qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd20d, + "%s: scratch [%lx]\n", __func__, *len); + qla27xx_insert32(0xaaaaaaaa, buf, len); + qla27xx_insert32(0xbbbbbbbb, buf, len); + qla27xx_insert32(0xcccccccc, buf, len); + qla27xx_insert32(0xdddddddd, buf, len); + qla27xx_insert32(*len + sizeof(uint32_t), buf, len); + if (buf) + ent->t269.scratch_size = 5 * sizeof(uint32_t); + + return false; +} + +static int +qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + ulong dwords = ent->t270.count; + ulong addr = ent->t270.addr; + + ql_dbg(ql_dbg_misc, vha, 0xd20e, + "%s: rdremreg [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); + while (dwords--) { + qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); + qla27xx_insert32(addr, buf, len); + qla27xx_read_reg(reg, 0xc4, buf, len); + addr += sizeof(uint32_t); + } + + return false; +} + +static int +qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + ulong addr = ent->t271.addr; + ulong data = ent->t271.data; + + ql_dbg(ql_dbg_misc, vha, 0xd20f, + "%s: wrremreg [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); + qla27xx_write_reg(reg, 0xc4, data, buf); + qla27xx_write_reg(reg, 0xc0, addr, buf); + + return false; +} + +static int +qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong dwords = ent->t272.count; + ulong start = ent->t272.addr; + + ql_dbg(ql_dbg_misc, vha, 0xd210, + "%s: rdremram [%lx]\n", __func__, *len); + if (buf) { + ql_dbg(ql_dbg_misc, vha, 0xd02c, + "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); + buf += *len; + qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf); + } + *len += dwords * sizeof(uint32_t); + + return false; +} + +static int +qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ulong dwords = ent->t273.count; + ulong addr = ent->t273.addr; + uint32_t value; + + ql_dbg(ql_dbg_misc, vha, 0xd211, + "%s: pcicfg [%lx]\n", __func__, *len); + while (dwords--) { + value = ~0; + if (pci_read_config_dword(vha->hw->pdev, addr, &value)) + ql_dbg(ql_dbg_misc, vha, 0xd02d, + "%s: failed pcicfg read at %lx\n", __func__, addr); + qla27xx_insert32(addr, buf, len); + qla27xx_insert32(value, buf, len); + addr += sizeof(uint32_t); + } + + return false; +} + +static int +qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint count = 0; + uint i; + + ql_dbg(ql_dbg_misc, vha, 0xd212, + "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len); + if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { + for (i = 0; i < vha->hw->max_req_queues; i++) { + struct req_que *req = vha->hw->req_q_map[i]; + if (req || !buf) { + qla27xx_insert16(i, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(req && req->out_ptr ? + *req->out_ptr : 0, buf, len); + count++; + } + } + } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { + for (i = 0; i < vha->hw->max_rsp_queues; i++) { + struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + if (rsp || !buf) { + qla27xx_insert16(i, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(rsp && rsp->in_ptr ? + *rsp->in_ptr : 0, buf, len); + count++; + } + } + } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { + ql_dbg(ql_dbg_misc, vha, 0xd02e, + "%s: unsupported atio queue\n", __func__); + qla27xx_skip_entry(ent, buf); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd02f, + "%s: unknown queue %u\n", __func__, ent->t274.queue_type); + qla27xx_skip_entry(ent, buf); + } + + if (buf) + ent->t274.num_queues = count; + + if (!count) + qla27xx_skip_entry(ent, buf); + + return false; +} + +static int +qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + ql_dbg(ql_dbg_misc, vha, 0xd2ff, + "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len); + qla27xx_skip_entry(ent, buf); + + return false; +} + +struct qla27xx_fwdt_entry_call { + int type; + int (*call)( + struct scsi_qla_host *, + struct qla27xx_fwdt_entry *, + void *, + ulong *); +}; + +static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { + { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } , + { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } , + { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } , + { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } , + { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } , + { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } , + { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } , + { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } , + { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } , + { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } , + { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } , + { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } , + { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } , + { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } , + { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } , + { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } , + { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } , + { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } , + { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , + { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , + { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } , + { -1 , qla27xx_fwdt_entry_other } +}; + +static inline int (*qla27xx_find_entry(int type)) + (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *) +{ + struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list; + + while (list->type != -1 && list->type != type) + list++; + + return list->call; +} + +static inline void * +qla27xx_next_entry(void *p) +{ + struct qla27xx_fwdt_entry *ent = p; + + return p + ent->hdr.entry_size; +} + +static void +qla27xx_walk_template(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) +{ + struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset; + ulong count = tmp->entry_count; + + ql_dbg(ql_dbg_misc, vha, 0xd01a, + "%s: entry count %lx\n", __func__, count); + while (count--) { + if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len)) + break; + ent = qla27xx_next_entry(ent); + } + ql_dbg(ql_dbg_misc, vha, 0xd01b, + "%s: len=%lx\n", __func__, *len); +} + +static void +qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp) +{ + tmp->capture_timestamp = jiffies; +} + +static void +qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) +{ + uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; + int rval = 0; + + rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", + v+0, v+1, v+2, v+3, v+4, v+5); + + tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; + tmp->driver_info[1] = v[5] << 8 | v[4]; + tmp->driver_info[2] = 0x12345678; +} + +static void +qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp, + struct scsi_qla_host *vha) +{ + tmp->firmware_version[0] = vha->hw->fw_major_version; + tmp->firmware_version[1] = vha->hw->fw_minor_version; + tmp->firmware_version[2] = vha->hw->fw_subminor_version; + tmp->firmware_version[3] = + vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes; + tmp->firmware_version[4] = + vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]; +} + +static void +ql27xx_edit_template(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp) +{ + qla27xx_time_stamp(tmp); + qla27xx_driver_info(tmp); + qla27xx_firmware_info(tmp, vha); +} + +static inline uint32_t +qla27xx_template_checksum(void *p, ulong size) +{ + uint32_t *buf = p; + uint64_t sum = 0; + + size /= sizeof(*buf); + + while (size--) + sum += *buf++; + + sum = (sum & 0xffffffff) + (sum >> 32); + + return ~sum; +} + +static inline int +qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) +{ + return qla27xx_template_checksum(tmp, tmp->template_size) == 0; +} + +static inline int +qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp) +{ + return tmp->template_type == TEMPLATE_TYPE_FWDUMP; +} + +static void +qla27xx_execute_fwdt_template(struct scsi_qla_host *vha) +{ + struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; + ulong len; + + if (qla27xx_fwdt_template_valid(tmp)) { + len = tmp->template_size; + tmp = memcpy(vha->hw->fw_dump, tmp, len); + ql27xx_edit_template(vha, tmp); + qla27xx_walk_template(vha, tmp, tmp, &len); + vha->hw->fw_dump_len = len; + vha->hw->fw_dumped = 1; + } +} + +ulong +qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha) +{ + struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; + ulong len = 0; + + if (qla27xx_fwdt_template_valid(tmp)) { + len = tmp->template_size; + qla27xx_walk_template(vha, tmp, NULL, &len); + } + + return len; +} + +ulong +qla27xx_fwdt_template_size(void *p) +{ + struct qla27xx_fwdt_template *tmp = p; + + return tmp->template_size; +} + +ulong +qla27xx_fwdt_template_default_size(void) +{ + return sizeof(ql27xx_fwdt_default_template); +} + +const void * +qla27xx_fwdt_template_default(void) +{ + return ql27xx_fwdt_default_template; +} + +int +qla27xx_fwdt_template_valid(void *p) +{ + struct qla27xx_fwdt_template *tmp = p; + + if (!qla27xx_verify_template_header(tmp)) { + ql_log(ql_log_warn, NULL, 0xd01c, + "%s: template type %x\n", __func__, tmp->template_type); + return false; + } + + if (!qla27xx_verify_template_checksum(tmp)) { + ql_log(ql_log_warn, NULL, 0xd01d, + "%s: failed template checksum\n", __func__); + return false; + } + + return true; +} + +void +qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) +{ + ulong flags = 0; + + if (!hardware_locked) + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + + if (!vha->hw->fw_dump) + ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n"); + else if (!vha->hw->fw_dump_template) + ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n"); + else + qla27xx_execute_fwdt_template(vha); + + if (!hardware_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h new file mode 100644 index 00000000000..1967424c8e6 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -0,0 +1,216 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ + +#ifndef __QLA_DMP27_H__ +#define __QLA_DMP27_H__ + +#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr) + +struct __packed qla27xx_fwdt_template { + uint32_t template_type; + uint32_t entry_offset; + uint32_t template_size; + uint32_t reserved_1; + + uint32_t entry_count; + uint32_t template_version; + uint32_t capture_timestamp; + uint32_t template_checksum; + + uint32_t reserved_2; + uint32_t driver_info[3]; + + uint32_t saved_state[16]; + + uint32_t reserved_3[8]; + uint32_t firmware_version[5]; +}; + +#define TEMPLATE_TYPE_FWDUMP 99 + +#define ENTRY_TYPE_NOP 0 +#define ENTRY_TYPE_TMP_END 255 +#define ENTRY_TYPE_RD_IOB_T1 256 +#define ENTRY_TYPE_WR_IOB_T1 257 +#define ENTRY_TYPE_RD_IOB_T2 258 +#define ENTRY_TYPE_WR_IOB_T2 259 +#define ENTRY_TYPE_RD_PCI 260 +#define ENTRY_TYPE_WR_PCI 261 +#define ENTRY_TYPE_RD_RAM 262 +#define ENTRY_TYPE_GET_QUEUE 263 +#define ENTRY_TYPE_GET_FCE 264 +#define ENTRY_TYPE_PSE_RISC 265 +#define ENTRY_TYPE_RST_RISC 266 +#define ENTRY_TYPE_DIS_INTR 267 +#define ENTRY_TYPE_GET_HBUF 268 +#define ENTRY_TYPE_SCRATCH 269 +#define ENTRY_TYPE_RDREMREG 270 +#define ENTRY_TYPE_WRREMREG 271 +#define ENTRY_TYPE_RDREMRAM 272 +#define ENTRY_TYPE_PCICFG 273 +#define ENTRY_TYPE_GET_SHADOW 274 + +#define CAPTURE_FLAG_PHYS_ONLY BIT_0 +#define CAPTURE_FLAG_PHYS_VIRT BIT_1 + +#define DRIVER_FLAG_SKIP_ENTRY BIT_7 + +struct __packed qla27xx_fwdt_entry { + struct __packed { + uint32_t entry_type; + uint32_t entry_size; + uint32_t reserved_1; + + uint8_t capture_flags; + uint8_t reserved_2[2]; + uint8_t driver_flags; + } hdr; + union __packed { + struct __packed { + } t0; + + struct __packed { + } t255; + + struct __packed { + uint32_t base_addr; + uint8_t reg_width; + uint16_t reg_count; + uint8_t pci_offset; + } t256; + + struct __packed { + uint32_t base_addr; + uint32_t write_data; + uint8_t pci_offset; + uint8_t reserved[3]; + } t257; + + struct __packed { + uint32_t base_addr; + uint8_t reg_width; + uint16_t reg_count; + uint8_t pci_offset; + uint8_t banksel_offset; + uint8_t reserved[3]; + uint32_t bank; + } t258; + + struct __packed { + uint32_t base_addr; + uint32_t write_data; + uint8_t reserved[2]; + uint8_t pci_offset; + uint8_t banksel_offset; + uint32_t bank; + } t259; + + struct __packed { + uint8_t pci_offset; + uint8_t reserved[3]; + } t260; + + struct __packed { + uint8_t pci_offset; + uint8_t reserved[3]; + uint32_t write_data; + } t261; + + struct __packed { + uint8_t ram_area; + uint8_t reserved[3]; + uint32_t start_addr; + uint32_t end_addr; + } t262; + + struct __packed { + uint32_t num_queues; + uint8_t queue_type; + uint8_t reserved[3]; + } t263; + + struct __packed { + uint32_t fce_trace_size; + uint64_t write_pointer; + uint64_t base_pointer; + uint32_t fce_enable_mb0; + uint32_t fce_enable_mb2; + uint32_t fce_enable_mb3; + uint32_t fce_enable_mb4; + uint32_t fce_enable_mb5; + uint32_t fce_enable_mb6; + } t264; + + struct __packed { + } t265; + + struct __packed { + } t266; + + struct __packed { + uint8_t pci_offset; + uint8_t reserved[3]; + uint32_t data; + } t267; + + struct __packed { + uint8_t buf_type; + uint8_t reserved[3]; + uint32_t buf_size; + uint64_t start_addr; + } t268; + + struct __packed { + uint32_t scratch_size; + } t269; + + struct __packed { + uint32_t addr; + uint32_t count; + } t270; + + struct __packed { + uint32_t addr; + uint32_t data; + } t271; + + struct __packed { + uint32_t addr; + uint32_t count; + } t272; + + struct __packed { + uint32_t addr; + uint32_t count; + } t273; + + struct __packed { + uint32_t num_queues; + uint8_t queue_type; + uint8_t reserved[3]; + } t274; + }; +}; + +#define T262_RAM_AREA_CRITICAL_RAM 1 +#define T262_RAM_AREA_EXTERNAL_RAM 2 +#define T262_RAM_AREA_SHARED_RAM 3 +#define T262_RAM_AREA_DDR_RAM 4 + +#define T263_QUEUE_TYPE_REQ 1 +#define T263_QUEUE_TYPE_RSP 2 +#define T263_QUEUE_TYPE_ATIO 3 + +#define T268_BUF_TYPE_EXTD_TRACE 1 +#define T268_BUF_TYPE_EXCH_BUFOFF 2 +#define T268_BUF_TYPE_EXTD_LOGIN 3 + +#define T274_QUEUE_TYPE_REQ_SHAD 1 +#define T274_QUEUE_TYPE_RSP_SHAD 2 +#define T274_QUEUE_TYPE_ATIO_SHAD 3 + +#endif diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2b6e478d9e3..4d2c98cbec4 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -1,15 +1,15 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ /* * Driver version */ -#define QLA2XXX_VERSION "8.04.00.08-k" +#define QLA2XXX_VERSION "8.07.00.08-k" #define QLA_DRIVER_MAJOR_VER 8 -#define QLA_DRIVER_MINOR_VER 4 +#define QLA_DRIVER_MINOR_VER 7 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index d182c96e17e..e2beab96209 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -2,12 +2,9 @@ * This file contains tcm implementation using v4 configfs fabric infrastructure * for QLogic target mode HBAs * - * ?? Copyright 2010-2011 RisingTide Systems LLC. + * (c) Copyright 2010-2013 Datera, Inc. * - * Licensed to the Linux Foundation under the General Public License (GPL) - * version 2. - * - * Author: Nicholas A. Bellinger <nab@risingtidesystems.com> + * Author: Nicholas A. Bellinger <nab@daterainc.com> * * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from * the TCM_FC / Open-FCoE.org fabric module. @@ -56,16 +53,6 @@ struct workqueue_struct *tcm_qla2xxx_free_wq; struct workqueue_struct *tcm_qla2xxx_cmd_wq; -static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg) -{ - return 1; -} - -static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg) -{ - return 0; -} - /* * Parse WWN. * If strict, we require lower-case hex and colon separators to be sure @@ -177,7 +164,7 @@ static int tcm_qla2xxx_npiv_parse_wwn( *wwnn = 0; /* count may include a LF at end of string */ - if (name[cnt-1] == '\n') + if (name[cnt-1] == '\n' || name[cnt-1] == 0) cnt--; /* validate we have enough characters for WWPN */ @@ -195,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn( return 0; } -static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, - u64 wwpn, u64 wwnn) -{ - u8 b[8], b2[8]; - - put_unaligned_be64(wwpn, b); - put_unaligned_be64(wwnn, b2); - return snprintf(buf, len, - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x," - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", - b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], - b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]); -} - static char *tcm_qla2xxx_npiv_get_fabric_name(void) { return "qla2xxx_npiv"; @@ -240,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) return lport->lport_naa_name; } -static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - - return &lport->lport_npiv_name[0]; -} - static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) { struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, @@ -333,7 +297,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->generate_node_acls; + return tpg->tpg_attrib.generate_node_acls; } static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) @@ -341,7 +305,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; + return tpg->tpg_attrib.cache_dynamic_acls; } static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) @@ -349,7 +313,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; + return tpg->tpg_attrib.demo_mode_write_protect; } static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) @@ -357,7 +321,15 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; + return tpg->tpg_attrib.prod_mode_write_protect; +} + +static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) +{ + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + + return tpg->tpg_attrib.demo_mode_login_only; } static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( @@ -489,42 +461,22 @@ static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) return 0; } -/* - * The LIO target core uses DMA_TO_DEVICE to mean that data is going - * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean - * that data is coming from the target (eg handling a READ). However, - * this is just the opposite of what we have to tell the DMA mapping - * layer -- eg when handling a READ, the HBA will have to DMA the data - * out of memory so it can send it to the initiator, which means we - * need to use DMA_TO_DEVICE when we map the data. - */ -static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd) -{ - if (se_cmd->se_cmd_flags & SCF_BIDI) - return DMA_BIDIRECTIONAL; - - switch (se_cmd->data_direction) { - case DMA_TO_DEVICE: - return DMA_FROM_DEVICE; - case DMA_FROM_DEVICE: - return DMA_TO_DEVICE; - case DMA_NONE: - default: - return DMA_NONE; - } -} - static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); cmd->bufflen = se_cmd->data_length; - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; + cmd->prot_sg_cnt = se_cmd->t_prot_nents; + cmd->prot_sg = se_cmd->t_prot_sg; + cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; + se_cmd->pi_err = 0; + /* * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup * the SGL mappings into PCIe memory for incoming FCP WRITE data. @@ -620,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) return; } - transport_generic_request_failure(&cmd->se_cmd, - TCM_CHECK_CONDITION_ABORT_CMD); + if (cmd->se_cmd.pi_err) + transport_generic_request_failure(&cmd->se_cmd, + cmd->se_cmd.pi_err); + else + transport_generic_request_failure(&cmd->se_cmd, + TCM_CHECK_CONDITION_ABORT_CMD); + return; } @@ -637,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) queue_work(tcm_qla2xxx_free_wq, &cmd->work); } +static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + + /* take an extra kref to prevent cmd free too early. + * need to wait for SCSI status/check condition to + * finish responding generate by transport_generic_request_failure. + */ + kref_get(&cmd->se_cmd.cmd_kref); + transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); +} + +/* + * Called from qla_target.c:qlt_do_ctio_completion() + */ +static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) +{ + INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); +} + /* * Called from qla_target.c:qlt_issue_task_mgmt() */ @@ -656,13 +634,18 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) struct qla_tgt_cmd, se_cmd); cmd->bufflen = se_cmd->data_length; - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; cmd->offset = 0; + cmd->prot_sg_cnt = se_cmd->t_prot_nents; + cmd->prot_sg = se_cmd->t_prot_sg; + cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; + se_cmd->pi_err = 0; + /* * Now queue completed DATA_IN the qla2xxx LLD and response ring */ @@ -680,7 +663,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg = NULL; cmd->sg_cnt = 0; cmd->offset = 0; - cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd); + cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); if (se_cmd->data_direction == DMA_FROM_DEVICE) { @@ -688,8 +671,12 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen * for qla_tgt_xmit_response LLD code */ + if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; + se_cmd->residual_count = 0; + } se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; - se_cmd->residual_count = se_cmd->data_length; + se_cmd->residual_count += se_cmd->data_length; cmd->bufflen = 0; } @@ -699,7 +686,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); } -static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) +static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) { struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, @@ -731,8 +718,20 @@ static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) * CTIO response packet. */ qlt_xmit_tm_rsp(mcmd); +} - return 0; +static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) +{ + struct qla_tgt_cmd *cmd = container_of(se_cmd, + struct qla_tgt_cmd, se_cmd); + struct scsi_qla_host *vha = cmd->vha; + struct qla_hw_data *ha = vha->hw; + + if (!cmd->sg_mapped) + return; + + pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); + cmd->sg_mapped = 0; } /* Local pointer to allocated TCM configfs fabric module */ @@ -795,12 +794,17 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess) static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess) { - tcm_qla2xxx_put_session(sess->se_sess); + if (!sess) + return; + + assert_spin_locked(&sess->vha->hw->hardware_lock); + kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session); } static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess) { - tcm_qla2xxx_shutdown_session(sess->se_sess); + assert_spin_locked(&sess->vha->hw->hardware_lock); + target_sess_cmd_list_set_waiting(sess->se_sess); } static struct se_node_acl *tcm_qla2xxx_make_nodeacl( @@ -863,7 +867,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ struct tcm_qla2xxx_tpg, se_tpg); \ \ - return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \ + return sprintf(page, "%u\n", tpg->tpg_attrib.name); \ } \ \ static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \ @@ -935,11 +939,19 @@ DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect); DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR); +/* + * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only + */ +DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only); +DEF_QLA_TPG_ATTRIB(demo_mode_login_only); +QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR); + static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr, &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr, &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr, &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr, + &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr, NULL, }; @@ -956,16 +968,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable( atomic_read(&tpg->lport_tpg_enabled)); } +static void tcm_qla2xxx_depend_tpg(struct work_struct *work) +{ + struct tcm_qla2xxx_tpg *base_tpg = container_of(work, + struct tcm_qla2xxx_tpg, tpg_base_work); + struct se_portal_group *se_tpg = &base_tpg->se_tpg; + struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; + + if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item)) { + atomic_set(&base_tpg->lport_tpg_enabled, 1); + qlt_enable_vha(base_vha); + } + complete(&base_tpg->tpg_base_comp); +} + +static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) +{ + struct tcm_qla2xxx_tpg *base_tpg = container_of(work, + struct tcm_qla2xxx_tpg, tpg_base_work); + struct se_portal_group *se_tpg = &base_tpg->se_tpg; + struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; + + if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { + atomic_set(&base_tpg->lport_tpg_enabled, 0); + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); + } + complete(&base_tpg->tpg_base_comp); +} + static ssize_t tcm_qla2xxx_tpg_store_enable( struct se_portal_group *se_tpg, const char *page, size_t count) { - struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; - struct tcm_qla2xxx_lport *lport = container_of(se_wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct scsi_qla_host *vha = lport->qla_vha; - struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); unsigned long op; @@ -980,19 +1017,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable( pr_err("Illegal value for tpg_enable: %lu\n", op); return -EINVAL; } - if (op) { - atomic_set(&tpg->lport_tpg_enabled, 1); - qlt_enable_vha(vha); + if (atomic_read(&tpg->lport_tpg_enabled)) + return -EEXIST; + + INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg); } else { - if (!ha->tgt.qla_tgt) { - pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); - return -ENODEV; - } - atomic_set(&tpg->lport_tpg_enabled, 0); - qlt_stop_phase1(ha->tgt.qla_tgt); + if (!atomic_read(&tpg->lport_tpg_enabled)) + return count; + + INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg); } + init_completion(&tpg->tpg_base_comp); + schedule_work(&tpg->tpg_base_work); + wait_for_completion(&tpg->tpg_base_comp); + if (op) { + if (!atomic_read(&tpg->lport_tpg_enabled)) + return -ENODEV; + } else { + if (atomic_read(&tpg->lport_tpg_enabled)) + return -EPERM; + } return count; } @@ -1019,7 +1065,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) return ERR_PTR(-EINVAL); - if (!lport->qla_npiv_vp && (tpgt != 1)) { + if ((tpgt != 1)) { pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); return ERR_PTR(-ENOSYS); } @@ -1035,9 +1081,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic * NodeACLs */ - QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; - QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; - QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -1045,11 +1092,8 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg( kfree(tpg); return NULL; } - /* - * Setup local TPG=1 pointer for non NPIV mode. - */ - if (lport->qla_npiv_vp == NULL) - lport->tpg_1 = tpg; + + lport->tpg_1 = tpg; return &tpg->se_tpg; } @@ -1060,24 +1104,75 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) struct tcm_qla2xxx_tpg, se_tpg); struct tcm_qla2xxx_lport *lport = tpg->lport; struct scsi_qla_host *vha = lport->qla_vha; - struct qla_hw_data *ha = vha->hw; /* * Call into qla2x_target.c LLD logic to shutdown the active * FC Nexuses and disable target mode operation for this qla_hw_data */ - if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) - qlt_stop_phase1(ha->tgt.qla_tgt); + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) + qlt_stop_phase1(vha->vha_tgt.qla_tgt); core_tpg_deregister(se_tpg); /* * Clear local TPG=1 pointer for non NPIV mode. */ - if (lport->qla_npiv_vp == NULL) - lport->tpg_1 = NULL; - + lport->tpg_1 = NULL; kfree(tpg); } +static ssize_t tcm_qla2xxx_npiv_tpg_show_enable( + struct se_portal_group *se_tpg, + char *page) +{ + return tcm_qla2xxx_tpg_show_enable(se_tpg, page); +} + +static ssize_t tcm_qla2xxx_npiv_tpg_store_enable( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; + struct tcm_qla2xxx_lport *lport = container_of(se_wwn, + struct tcm_qla2xxx_lport, lport_wwn); + struct scsi_qla_host *vha = lport->qla_vha; + struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, + struct tcm_qla2xxx_tpg, se_tpg); + unsigned long op; + int rc; + + rc = kstrtoul(page, 0, &op); + if (rc < 0) { + pr_err("kstrtoul() returned %d\n", rc); + return -EINVAL; + } + if ((op != 1) && (op != 0)) { + pr_err("Illegal value for tpg_enable: %lu\n", op); + return -EINVAL; + } + if (op) { + if (atomic_read(&tpg->lport_tpg_enabled)) + return -EEXIST; + + atomic_set(&tpg->lport_tpg_enabled, 1); + qlt_enable_vha(vha); + } else { + if (!atomic_read(&tpg->lport_tpg_enabled)) + return count; + + atomic_set(&tpg->lport_tpg_enabled, 0); + qlt_stop_phase1(vha->vha_tgt.qla_tgt); + } + + return count; +} + +TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { + &tcm_qla2xxx_npiv_tpg_enable.attr, + NULL, +}; + static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( struct se_wwn *wwn, struct config_group *group, @@ -1102,12 +1197,22 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( tpg->lport = lport; tpg->lport_tpgt = tpgt; + /* + * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic + * NodeACLs + */ + tpg->tpg_attrib.generate_node_acls = 1; + tpg->tpg_attrib.demo_mode_write_protect = 1; + tpg->tpg_attrib.cache_dynamic_acls = 1; + tpg->tpg_attrib.demo_mode_login_only = 1; + ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; } + lport->tpg_1 = tpg; return &tpg->se_tpg; } @@ -1118,13 +1223,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( scsi_qla_host_t *vha, const uint8_t *s_id) { - struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; struct se_node_acl *se_nacl; struct tcm_qla2xxx_nacl *nacl; u32 key; - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); @@ -1228,13 +1332,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id( scsi_qla_host_t *vha, const uint16_t loop_id) { - struct qla_hw_data *ha = vha->hw; struct tcm_qla2xxx_lport *lport; struct se_node_acl *se_nacl; struct tcm_qla2xxx_nacl *nacl; struct tcm_qla2xxx_fc_loopid *fc_loopid; - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); @@ -1348,6 +1451,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); struct se_session *se_sess; struct se_node_acl *se_nacl; struct tcm_qla2xxx_lport *lport; @@ -1364,13 +1468,13 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) se_nacl = se_sess->se_node_acl; nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); return; } - target_wait_for_sess_cmds(se_sess, 0); + target_wait_for_sess_cmds(se_sess); transport_deregister_session_configfs(sess->se_sess); transport_deregister_session(sess->se_sess); @@ -1397,8 +1501,10 @@ static int tcm_qla2xxx_check_initiator_node_acl( struct qla_tgt_sess *sess = qla_tgt_sess; unsigned char port_name[36]; unsigned long flags; + int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count : + TCM_QLA2XXX_DEFAULT_TAGS; - lport = ha->tgt.target_lport_ptr; + lport = vha->vha_tgt.target_lport_ptr; if (!lport) { pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); dump_stack(); @@ -1414,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl( } se_tpg = &tpg->se_tpg; - se_sess = transport_init_session(); + se_sess = transport_init_session_tags(num_tags, + sizeof(struct qla_tgt_cmd), + TARGET_PROT_NORMAL); if (IS_ERR(se_sess)) { pr_err("Unable to initialize struct se_session\n"); return PTR_ERR(se_sess); @@ -1462,7 +1570,8 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, { struct qla_tgt *tgt = sess->tgt; struct qla_hw_data *ha = tgt->ha; - struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); @@ -1470,15 +1579,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24) - pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", - sess, - sess->port_name[0], sess->port_name[1], - sess->port_name[2], sess->port_name[3], - sess->port_name[4], sess->port_name[5], - sess->port_name[6], sess->port_name[7], - sess->loop_id, loop_id, - sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, - s_id.b.domain, s_id.b.area, s_id.b.al_pa); + pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", + sess, sess->port_name, + sess->loop_id, loop_id, sess->s_id.b.domain, + sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain, + s_id.b.area, s_id.b.al_pa); if (sess->loop_id != loop_id) { /* @@ -1535,6 +1640,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .handle_cmd = tcm_qla2xxx_handle_cmd, .handle_data = tcm_qla2xxx_handle_data, + .handle_dif_err = tcm_qla2xxx_handle_dif_err, .handle_tmr = tcm_qla2xxx_handle_tmr, .free_cmd = tcm_qla2xxx_free_cmd, .free_mcmd = tcm_qla2xxx_free_mcmd, @@ -1573,15 +1679,18 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) return 0; } -static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) +static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, + void *target_lport_ptr, + u64 npiv_wwpn, u64 npiv_wwnn) { struct qla_hw_data *ha = vha->hw; - struct tcm_qla2xxx_lport *lport; + struct tcm_qla2xxx_lport *lport = + (struct tcm_qla2xxx_lport *)target_lport_ptr; /* - * Setup local pointer to vha, NPIV VP pointer (if present) and - * vha->tcm_lport pointer + * Setup tgt_ops, local pointer to vha and target_lport_ptr */ - lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; + ha->tgt.tgt_ops = &tcm_qla2xxx_template; + vha->vha_tgt.target_lport_ptr = target_lport_ptr; lport->qla_vha = vha; return 0; @@ -1613,8 +1722,8 @@ static struct se_wwn *tcm_qla2xxx_make_lport( if (ret != 0) goto out; - ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, - tcm_qla2xxx_lport_register_cb, lport); + ret = qlt_lport_register(lport, wwpn, 0, 0, + tcm_qla2xxx_lport_register_cb); if (ret != 0) goto out_lport; @@ -1632,7 +1741,6 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); struct scsi_qla_host *vha = lport->qla_vha; - struct qla_hw_data *ha = vha->hw; struct se_node_acl *node; u32 key = 0; @@ -1641,8 +1749,8 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) * shutdown of struct qla_tgt after the call to * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. */ - if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) - qlt_stop_phase2(ha->tgt.qla_tgt); + if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) + qlt_stop_phase2(vha->vha_tgt.qla_tgt); qlt_lport_deregister(vha); @@ -1653,17 +1761,79 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) kfree(lport); } +static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, + void *target_lport_ptr, + u64 npiv_wwpn, u64 npiv_wwnn) +{ + struct fc_vport *vport; + struct Scsi_Host *sh = base_vha->host; + struct scsi_qla_host *npiv_vha; + struct tcm_qla2xxx_lport *lport = + (struct tcm_qla2xxx_lport *)target_lport_ptr; + struct tcm_qla2xxx_lport *base_lport = + (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; + struct tcm_qla2xxx_tpg *base_tpg; + struct fc_vport_identifiers vport_id; + + if (!qla_tgt_mode_enabled(base_vha)) { + pr_err("qla2xxx base_vha not enabled for target mode\n"); + return -EPERM; + } + + if (!base_lport || !base_lport->tpg_1 || + !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { + pr_err("qla2xxx base_lport or tpg_1 not available\n"); + return -EPERM; + } + base_tpg = base_lport->tpg_1; + + memset(&vport_id, 0, sizeof(vport_id)); + vport_id.port_name = npiv_wwpn; + vport_id.node_name = npiv_wwnn; + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; + vport_id.vport_type = FC_PORTTYPE_NPIV; + vport_id.disable = false; + + vport = fc_vport_create(sh, 0, &vport_id); + if (!vport) { + pr_err("fc_vport_create failed for qla2xxx_npiv\n"); + return -ENODEV; + } + /* + * Setup local pointer to NPIV vhba + target_lport_ptr + */ + npiv_vha = (struct scsi_qla_host *)vport->dd_data; + npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; + lport->qla_vha = npiv_vha; + scsi_host_get(npiv_vha->host); + return 0; +} + + static struct se_wwn *tcm_qla2xxx_npiv_make_lport( struct target_fabric_configfs *tf, struct config_group *group, const char *name) { struct tcm_qla2xxx_lport *lport; - u64 npiv_wwpn, npiv_wwnn; + u64 phys_wwpn, npiv_wwpn, npiv_wwnn; + char *p, tmp[128]; int ret; - if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, - &npiv_wwpn, &npiv_wwnn) < 0) + snprintf(tmp, 128, "%s", name); + + p = strchr(tmp, '@'); + if (!p) { + pr_err("Unable to locate NPIV '@' seperator\n"); + return ERR_PTR(-EINVAL); + } + *p++ = '\0'; + + if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) + return ERR_PTR(-EINVAL); + + if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, + &npiv_wwpn, &npiv_wwnn) < 0) return ERR_PTR(-EINVAL); lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); @@ -1673,16 +1843,21 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport( } lport->lport_npiv_wwpn = npiv_wwpn; lport->lport_npiv_wwnn = npiv_wwnn; - tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], - TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn); sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); -/* FIXME: tcm_qla2xxx_npiv_make_lport */ - ret = -ENOSYS; + ret = tcm_qla2xxx_init_lport(lport); if (ret != 0) goto out; + ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, + tcm_qla2xxx_lport_register_npiv_cb); + if (ret != 0) + goto out_lport; + return &lport->lport_wwn; +out_lport: + vfree(lport->lport_loopid_map); + btree_destroy32(&lport->lport_fcport_map); out: kfree(lport); return ERR_PTR(ret); @@ -1692,14 +1867,16 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) { struct tcm_qla2xxx_lport *lport = container_of(wwn, struct tcm_qla2xxx_lport, lport_wwn); - struct scsi_qla_host *vha = lport->qla_vha; - struct Scsi_Host *sh = vha->host; + struct scsi_qla_host *npiv_vha = lport->qla_vha; + struct qla_hw_data *ha = npiv_vha->hw; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + + scsi_host_put(npiv_vha->host); /* - * Notify libfc that we want to release the lport->npiv_vport + * Notify libfc that we want to release the vha->fc_vport */ - fc_vport_terminate(lport->npiv_vport); - - scsi_host_put(sh); + fc_vport_terminate(npiv_vha->fc_vport); + scsi_host_put(base_vha->host); kfree(lport); } @@ -1736,7 +1913,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { tcm_qla2xxx_check_demo_write_protect, .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_prod_write_protect, - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, @@ -1755,6 +1932,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .aborted_task = tcm_qla2xxx_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c @@ -1774,20 +1952,22 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = { static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name, .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident, - .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn, + .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, .tpg_get_tag = tcm_qla2xxx_get_tag, .tpg_get_default_depth = tcm_qla2xxx_get_default_depth, .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id, .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len, .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id, - .tpg_check_demo_mode = tcm_qla2xxx_check_false, - .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true, - .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, - .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true, + .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, + .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, + .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, + .tpg_check_prod_mode_write_protect = + tcm_qla2xxx_check_prod_write_protect, + .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl, .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl, .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, + .check_stop_free = tcm_qla2xxx_check_stop_free, .release_cmd = tcm_qla2xxx_release_cmd, .put_session = tcm_qla2xxx_put_session, .shutdown_session = tcm_qla2xxx_shutdown_session, @@ -1802,6 +1982,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .queue_data_in = tcm_qla2xxx_queue_data_in, .queue_status = tcm_qla2xxx_queue_status, .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, + .aborted_task = tcm_qla2xxx_aborted_task, /* * Setup function pointers for generic logic in * target_core_fabric_configfs.c @@ -1841,16 +2022,16 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; + fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = tcm_qla2xxx_tpg_attrib_attrs; - TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the fabric for use within TCM */ @@ -1881,15 +2062,16 @@ static int tcm_qla2xxx_register_configfs(void) /* * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl */ - TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; - TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; + npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = + tcm_qla2xxx_npiv_tpg_attrs; + npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; /* * Register the npiv_fabric for use within TCM */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 9ba075fe978..10c00214564 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -4,8 +4,11 @@ #define TCM_QLA2XXX_VERSION "v0.1" /* length of ASCII WWPNs including pad */ #define TCM_QLA2XXX_NAMELEN 32 -/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */ -#define TCM_QLA2XXX_NPIV_NAMELEN 66 +/* + * Number of pre-allocated per-session tags, based upon the worst-case + * per port number of iocbs + */ +#define TCM_QLA2XXX_DEFAULT_TAGS 2088 #include "qla_target.h" @@ -29,6 +32,7 @@ struct tcm_qla2xxx_tpg_attrib { int cache_dynamic_acls; int demo_mode_write_protect; int prod_mode_write_protect; + int demo_mode_login_only; }; struct tcm_qla2xxx_tpg { @@ -42,10 +46,11 @@ struct tcm_qla2xxx_tpg { struct tcm_qla2xxx_tpg_attrib tpg_attrib; /* Returned by tcm_qla2xxx_make_tpg() */ struct se_portal_group se_tpg; + /* Items for dealing with configfs_depend_item */ + struct completion tpg_base_comp; + struct work_struct tpg_base_work; }; -#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib) - struct tcm_qla2xxx_fc_loopid { struct se_node_acl *se_nacl; }; @@ -63,20 +68,14 @@ struct tcm_qla2xxx_lport { char lport_name[TCM_QLA2XXX_NAMELEN]; /* ASCII formatted naa WWPN for VPD page 83 etc */ char lport_naa_name[TCM_QLA2XXX_NAMELEN]; - /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */ - char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN]; /* map for fc_port pointers in 24-bit FC Port ID space */ struct btree_head32 lport_fcport_map; /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */ struct tcm_qla2xxx_fc_loopid *lport_loopid_map; /* Pointer to struct scsi_qla_host from qla2xxx LLD */ struct scsi_qla_host *qla_vha; - /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */ - struct scsi_qla_host *qla_npiv_vp; /* Pointer to struct qla_tgt pointer */ struct qla_tgt lport_qla_tgt; - /* Pointer to struct fc_vport for NPIV vport from libfc */ - struct fc_vport *npiv_vport; /* Pointer to TPG=1 for non NPIV mode */ struct tcm_qla2xxx_tpg *tpg_1; /* Returned by tcm_qla2xxx_make_lport() */ diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c index 5d8fe4f7565..556c1525f88 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.c +++ b/drivers/scsi/qla4xxx/ql4_83xx.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) qla4_83xx_flash_unlock(ha); } -/** - * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory - * @ha: Pointer to adapter structure - * @addr: Flash address to write to - * @data: Data to be written - * @count: word_count to be written - * - * Return: On success return QLA_SUCCESS - * On error return QLA_ERROR - **/ -static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, - uint32_t *data, uint32_t count) -{ - int i, j; - uint32_t agt_ctrl; - unsigned long flags; - int ret_val = QLA_SUCCESS; - - /* Only 128-bit aligned access */ - if (addr & 0xF) { - ret_val = QLA_ERROR; - goto exit_ms_mem_write; - } - - write_lock_irqsave(&ha->hw_lock, flags); - - /* Write address */ - ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", - __func__); - goto exit_ms_mem_write_unlock; - } - - for (i = 0; i < count; i++, addr += 16) { - if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, - QLA8XXX_ADDR_QDR_NET_MAX)) || - (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, - QLA8XXX_ADDR_DDR_NET_MAX)))) { - ret_val = QLA_ERROR; - goto exit_ms_mem_write_unlock; - } - - ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, - addr); - /* Write data */ - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_LO, - *data++); - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_HI, - *data++); - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_ULO, - *data++); - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_UHI, - *data++); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", - __func__); - goto exit_ms_mem_write_unlock; - } - - /* Check write status */ - ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, - MIU_TA_CTL_WRITE_ENABLE); - ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, - MIU_TA_CTL_WRITE_START); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", - __func__); - goto exit_ms_mem_write_unlock; - } - - for (j = 0; j < MAX_CTL_CHECK; j++) { - ret_val = qla4_83xx_rd_reg_indirect(ha, - MD_MIU_TEST_AGT_CTRL, - &agt_ctrl); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", - __func__); - goto exit_ms_mem_write_unlock; - } - if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) - break; - } - - /* Status check failed */ - if (j >= MAX_CTL_CHECK) { - printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", - __func__); - ret_val = QLA_ERROR; - goto exit_ms_mem_write_unlock; - } - } - -exit_ms_mem_write_unlock: - write_unlock_irqrestore(&ha->hw_lock, flags); - -exit_ms_mem_write: - return ret_val; -} - #define INTENT_TO_RECOVER 0x01 #define PROCEED_TO_RECOVER 0x02 @@ -465,7 +361,7 @@ int qla4_83xx_drv_lock(struct scsi_qla_host *ha) } /* Recovery Failed, some other function * has the lock, wait for 2secs and retry */ - ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n", + ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n", __func__, ha->func_num); timeout = 0; } @@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha) __func__)); /* 128 bit/16 byte write to MS memory */ - ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, + ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, count); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", @@ -1304,12 +1200,24 @@ static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha) static int qla4_83xx_restart(struct scsi_qla_host *ha) { int ret_val = QLA_SUCCESS; + uint32_t idc_ctrl; qla4_83xx_process_stop_seq(ha); - /* Collect minidump*/ - if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags)) + /* + * Collect minidump. + * If IDC_CTRL BIT1 is set, clear it on going to INIT state and + * don't collect minidump + */ + idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); + if (idc_ctrl & GRACEFUL_RESET_BIT1) { + qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, + (idc_ctrl & ~GRACEFUL_RESET_BIT1)); + ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n", + __func__); + } else { qla4_8xxx_get_minidump(ha); + } qla4_83xx_process_init_seq(ha); @@ -1473,9 +1381,9 @@ int qla4_83xx_isp_reset(struct scsi_qla_host *ha) __func__)); } - /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority - * and which drivers are present. Unlike ISP8022, the function setting - * NEED_RESET, may not be the Reset owner. */ + /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on + * priority and which drivers are present. Unlike ISP8022, the function + * setting NEED_RESET, may not be the Reset owner. */ if (qla4_83xx_can_perform_reset(ha)) set_bit(AF_8XXX_RST_OWNER, &ha->flags); @@ -1629,10 +1537,58 @@ static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha) ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n"); } +/** + * qla4_83xx_eport_init - Initialize EPort. + * @ha: Pointer to host adapter structure. + * + * If EPort hardware is in reset state before disabling pause, there would be + * serious hardware wedging issues. To prevent this perform eport init everytime + * before disabling pause frames. + **/ +static void qla4_83xx_eport_init(struct scsi_qla_host *ha) +{ + /* Clear the 8 registers */ + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0); + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0); + + /* Write any value to Reset Control register */ + qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF); + + ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n"); +} + void qla4_83xx_disable_pause(struct scsi_qla_host *ha) { ha->isp_ops->idc_lock(ha); + /* Before disabling pause frames, ensure that eport is not in reset */ + qla4_83xx_eport_init(ha); qla4_83xx_dump_pause_control_regs(ha); __qla4_83xx_disable_pause(ha); ha->isp_ops->idc_unlock(ha); } + +/** + * qla4_83xx_is_detached - Check if we are marked invisible. + * @ha: Pointer to host adapter structure. + **/ +int qla4_83xx_is_detached(struct scsi_qla_host *ha) +{ + uint32_t drv_active; + + drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + + if (test_bit(AF_INIT_DONE, &ha->flags) && + !(drv_active & (1 << ha->func_num))) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n", + __func__, drv_active)); + return QLA_SUCCESS; + } + + return QLA_ERROR; +} diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h index 6a00f903f2a..775fdf9fcc8 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.h +++ b/drivers/scsi/qla4xxx/ql4_83xx.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -55,6 +55,16 @@ #define QLA83XX_SET_PAUSE_VAL 0x0 #define QLA83XX_SET_TC_MAX_CELL_VAL 0x03FF03FF +#define QLA83XX_RESET_CONTROL 0x28084E50 +#define QLA83XX_RESET_REG 0x28084E60 +#define QLA83XX_RESET_PORT0 0x28084E70 +#define QLA83XX_RESET_PORT1 0x28084E80 +#define QLA83XX_RESET_PORT2 0x28084E90 +#define QLA83XX_RESET_PORT3 0x28084EA0 +#define QLA83XX_RESET_SRE_SHIM 0x28084EB0 +#define QLA83XX_RESET_EPG_SHIM 0x28084EC0 +#define QLA83XX_RESET_ETHER_PCS 0x28084ED0 + /* qla_83xx_reg_tbl registers */ #define QLA83XX_PEG_HALT_STATUS1 0x34A8 #define QLA83XX_PEG_HALT_STATUS2 0x34AC @@ -244,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd { uint32_t rsvd_1; }; +struct qla8044_minidump_entry_rddfe { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t value; + uint8_t stride; + uint8_t stride2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t modify_mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { + struct qla8xxx_minidump_entry_hdr h; + + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint8_t stride_1; + uint8_t stride_2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t value_2; + uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll; + uint32_t mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + /* RDMUX2 Entry */ struct qla83xx_minidump_entry_rdmux2 { struct qla8xxx_minidump_entry_hdr h; @@ -280,4 +334,38 @@ struct qla4_83xx_idc_information { uint32_t info3; /* IDC additional info */ }; +#define QLA83XX_PEX_DMA_ENGINE_INDEX 8 +#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000 +#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000 +#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0 +#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04 +#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08 + +#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024) +#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */ + +/* Read Memory: For Pex-DMA */ +struct qla4_83xx_minidump_entry_rdmem_pex_dma { + struct qla8xxx_minidump_entry_hdr h; + uint32_t desc_card_addr; + uint16_t dma_desc_cmd; + uint8_t rsvd[2]; + uint32_t start_dma_cmd; + uint8_t rsvd2[12]; + uint32_t read_addr; + uint32_t read_data_size; +}; + +struct qla4_83xx_pex_dma_descriptor { + struct { + uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */ + uint8_t rsvd[2]; + uint16_t dma_desc_cmd; + } cmd; + uint64_t src_addr; + uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func, + * 8-15: desc-cmd */ + uint8_t rsvd[24]; +} __packed; + #endif diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c index 19ee55a6226..463239c972b 100644 --- a/drivers/scsi/qla4xxx/ql4_attr.c +++ b/drivers/scsi/qla4xxx/ql4_attr.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2011 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -83,7 +83,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); if (is_qla8022(ha) || - (is_qla8032(ha) && + ((is_qla8032(ha) || is_qla8042(ha)) && qla4_83xx_can_perform_reset(ha))) { set_bit(AF_8XXX_RST_OWNER, &ha->flags); set_bit(AF_FW_RECOVERY, &ha->flags); @@ -158,14 +158,12 @@ qla4xxx_fw_version_show(struct device *dev, if (is_qla80XX(ha)) return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", - ha->firmware_version[0], - ha->firmware_version[1], - ha->patch_number, ha->build_number); + ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); else return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", - ha->firmware_version[0], - ha->firmware_version[1], - ha->patch_number, ha->build_number); + ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); } static ssize_t @@ -181,8 +179,8 @@ qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); - return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major, - ha->iscsi_minor); + return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major, + ha->fw_info.iscsi_minor); } static ssize_t @@ -191,8 +189,8 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr, { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", - ha->bootload_major, ha->bootload_minor, - ha->bootload_patch, ha->bootload_build); + ha->fw_info.bootload_major, ha->fw_info.bootload_minor, + ha->fw_info.bootload_patch, ha->fw_info.bootload_build); } static ssize_t @@ -259,6 +257,63 @@ qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name); } +static ssize_t +qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date, + ha->fw_info.fw_build_time); +} + +static ssize_t +qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user); +} + +static ssize_t +qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp); +} + +static ssize_t +qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + char *load_src = NULL; + + switch (ha->fw_info.fw_load_source) { + case 1: + load_src = "Flash Primary"; + break; + case 2: + load_src = "Flash Secondary"; + break; + case 3: + load_src = "Host Download"; + break; + } + + return snprintf(buf, PAGE_SIZE, "%s\n", load_src); +} + +static ssize_t +qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + qla4xxx_about_firmware(ha); + return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs, + ha->fw_uptime_msecs); +} + static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL); static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL); @@ -269,6 +324,12 @@ static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL); static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL); static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL); static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL); +static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL); +static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL); +static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show, + NULL); +static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL); +static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL); struct device_attribute *qla4xxx_host_attrs[] = { &dev_attr_fw_version, @@ -281,5 +342,10 @@ struct device_attribute *qla4xxx_host_attrs[] = { &dev_attr_phy_port_num, &dev_attr_iscsi_func_cnt, &dev_attr_hba_model, + &dev_attr_fw_timestamp, + &dev_attr_fw_build_user, + &dev_attr_fw_ext_timestamp, + &dev_attr_fw_load_src, + &dev_attr_fw_uptime, NULL, }; diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c index 8acdc582ff6..9f92cbf9647 100644 --- a/drivers/scsi/qla4xxx/ql4_bsg.c +++ b/drivers/scsi/qla4xxx/ql4_bsg.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2011 QLogic Corporation + * Copyright (c) 2011-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -446,6 +446,363 @@ leave: return rval; } +static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint8_t *rsp_ptr = NULL; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_ERROR; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + goto exit_diag_mem_test; + } + + bsg_reply->reply_payload_rcv_len = 0; + memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], + sizeof(uint32_t) * MBOX_REG_COUNT); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], + mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], + mbox_cmd[7])); + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], + &mbox_sts[0]); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], + mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], + mbox_sts[7])); + + if (status == QLA_SUCCESS) + bsg_reply->result = DID_OK << 16; + else + bsg_reply->result = DID_ERROR << 16; + + /* Send mbox_sts to application */ + bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); + rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); + memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); + +exit_diag_mem_test: + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: bsg_reply->result = x%x, status = %s\n", + __func__, bsg_reply->result, STATUS(status))); + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +} + +static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha, + int wait_for_link) +{ + int status = QLA_SUCCESS; + + if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) { + ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout", + __func__, ha->idc_extend_tmo); + if (ha->idc_extend_tmo) { + if (!wait_for_completion_timeout(&ha->idc_comp, + (ha->idc_extend_tmo * HZ))) { + ha->notify_idc_comp = 0; + ha->notify_link_up_comp = 0; + ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received", + __func__); + status = QLA_ERROR; + goto exit_wait; + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: IDC Complete notification received\n", + __func__)); + } + } + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: IDC Complete notification received\n", + __func__)); + } + ha->notify_idc_comp = 0; + + if (wait_for_link) { + if (!wait_for_completion_timeout(&ha->link_up_comp, + (IDC_COMP_TOV * HZ))) { + ha->notify_link_up_comp = 0; + ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received", + __func__); + status = QLA_ERROR; + goto exit_wait; + } else { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: LINK UP notification received\n", + __func__)); + } + ha->notify_link_up_comp = 0; + } + +exit_wait: + return status; +} + +static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha, + uint32_t *mbox_cmd) +{ + uint32_t config = 0; + int status = QLA_SUCCESS; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + status = qla4_83xx_get_port_config(ha, &config); + if (status != QLA_SUCCESS) + goto exit_pre_loopback_config; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n", + __func__, config)); + + if ((config & ENABLE_INTERNAL_LOOPBACK) || + (config & ENABLE_EXTERNAL_LOOPBACK)) { + ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid requiest\n", + __func__); + goto exit_pre_loopback_config; + } + + if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) + config |= ENABLE_INTERNAL_LOOPBACK; + + if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) + config |= ENABLE_EXTERNAL_LOOPBACK; + + config &= ~ENABLE_DCBX; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n", + __func__, config)); + + ha->notify_idc_comp = 1; + ha->notify_link_up_comp = 1; + + /* get the link state */ + qla4xxx_get_firmware_state(ha); + + status = qla4_83xx_set_port_config(ha, &config); + if (status != QLA_SUCCESS) { + ha->notify_idc_comp = 0; + ha->notify_link_up_comp = 0; + goto exit_pre_loopback_config; + } +exit_pre_loopback_config: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, + STATUS(status))); + return status; +} + +static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha, + uint32_t *mbox_cmd) +{ + int status = QLA_SUCCESS; + uint32_t config = 0; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + status = qla4_83xx_get_port_config(ha, &config); + if (status != QLA_SUCCESS) + goto exit_post_loopback_config; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__, + config)); + + if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) + config &= ~ENABLE_INTERNAL_LOOPBACK; + else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) + config &= ~ENABLE_EXTERNAL_LOOPBACK; + + config |= ENABLE_DCBX; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Restore default port config=%08X\n", __func__, + config)); + + ha->notify_idc_comp = 1; + if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) + ha->notify_link_up_comp = 1; + + status = qla4_83xx_set_port_config(ha, &config); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n", + __func__); + set_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(AF_LOOPBACK, &ha->flags); + goto exit_post_loopback_config; + } + +exit_post_loopback_config: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, + STATUS(status))); + return status; +} + +static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint8_t *rsp_ptr = NULL; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int wait_for_link = 1; + int status = QLA_ERROR; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + bsg_reply->reply_payload_rcv_len = 0; + + if (test_bit(AF_LOOPBACK, &ha->flags)) { + ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], + sizeof(uint32_t) * MBOX_REG_COUNT); + + if (is_qla8032(ha) || is_qla8042(ha)) { + status = qla4_83xx_pre_loopback_config(ha, mbox_cmd); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + status = qla4_83xx_wait_for_loopback_config_comp(ha, + wait_for_link); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_TIME_OUT << 16; + goto restore; + } + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], + mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], + mbox_cmd[7])); + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], + &mbox_sts[0]); + + if (status == QLA_SUCCESS) + bsg_reply->result = DID_OK << 16; + else + bsg_reply->result = DID_ERROR << 16; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", + __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], + mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], + mbox_sts[7])); + + /* Send mbox_sts to application */ + bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); + rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); + memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); +restore: + if (is_qla8032(ha) || is_qla8042(ha)) { + status = qla4_83xx_post_loopback_config(ha, mbox_cmd); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_ERROR << 16; + goto exit_loopback_cmd; + } + + /* for pre_loopback_config() wait for LINK UP only + * if PHY LINK is UP */ + if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP)) + wait_for_link = 0; + + status = qla4_83xx_wait_for_loopback_config_comp(ha, + wait_for_link); + if (status != QLA_SUCCESS) { + bsg_reply->result = DID_TIME_OUT << 16; + goto exit_loopback_cmd; + } + } +exit_loopback_cmd: + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: bsg_reply->result = x%x, status = %s\n", + __func__, bsg_reply->result, STATUS(status))); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +} + +static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + uint32_t diag_cmd; + int rval = -EINVAL; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + + diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + if (diag_cmd == MBOX_CMD_DIAG_TEST) { + switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) { + case QL_DIAG_CMD_TEST_DDR_SIZE: + case QL_DIAG_CMD_TEST_DDR_RW: + case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW: + case QL_DIAG_CMD_TEST_NVRAM: + case QL_DIAG_CMD_TEST_FLASH_ROM: + case QL_DIAG_CMD_TEST_DMA_XFER: + case QL_DIAG_CMD_SELF_DDR_RW: + case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW: + /* Execute diag test for adapter RAM/FLASH */ + ql4xxx_execute_diag_cmd(bsg_job); + /* Always return success as we want to sent bsg_reply + * to Application */ + rval = QLA_SUCCESS; + break; + + case QL_DIAG_CMD_TEST_INT_LOOPBACK: + case QL_DIAG_CMD_TEST_EXT_LOOPBACK: + /* Execute diag test for Network */ + qla4xxx_execute_diag_loopback_cmd(bsg_job); + /* Always return success as we want to sent bsg_reply + * to Application */ + rval = QLA_SUCCESS; + break; + default: + ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n", + __func__, + bsg_req->rqst_data.h_vendor.vendor_cmd[2]); + } + } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) || + (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) { + ql4xxx_execute_diag_cmd(bsg_job); + rval = QLA_SUCCESS; + } else { + ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n", + __func__, diag_cmd); + } + + return rval; +} + /** * qla4xxx_process_vendor_specific - handle vendor specific bsg request * @job: iscsi_bsg_job to handle @@ -479,6 +836,9 @@ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job) case QLISCSI_VND_GET_ACB: return qla4xxx_bsg_get_acb(bsg_job); + case QLISCSI_VND_DIAG_TEST: + return qla4xxx_execute_diag_test(bsg_job); + default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: " "0x%x\n", __func__, bsg_req->msgcode); diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h index c6a0364509f..88c2401910c 100644 --- a/drivers/scsi/qla4xxx/ql4_bsg.h +++ b/drivers/scsi/qla4xxx/ql4_bsg.h @@ -15,5 +15,18 @@ #define QLISCSI_VND_UPDATE_NVRAM 5 #define QLISCSI_VND_RESTORE_DEFAULTS 6 #define QLISCSI_VND_GET_ACB 7 +#define QLISCSI_VND_DIAG_TEST 8 + +/* QLISCSI_VND_DIAG_CMD sub code */ +#define QL_DIAG_CMD_TEST_DDR_SIZE 0x2 +#define QL_DIAG_CMD_TEST_DDR_RW 0x3 +#define QL_DIAG_CMD_TEST_ONCHIP_MEM_RW 0x4 +#define QL_DIAG_CMD_TEST_NVRAM 0x5 /* Only ISP4XXX */ +#define QL_DIAG_CMD_TEST_FLASH_ROM 0x6 +#define QL_DIAG_CMD_TEST_INT_LOOPBACK 0x7 +#define QL_DIAG_CMD_TEST_EXT_LOOPBACK 0x8 +#define QL_DIAG_CMD_TEST_DMA_XFER 0x9 /* Only ISP4XXX */ +#define QL_DIAG_CMD_SELF_DDR_RW 0xC +#define QL_DIAG_CMD_SELF_ONCHIP_MEM_RW 0xD #endif diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c index 77b7c594010..5649e9ef59a 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.c +++ b/drivers/scsi/qla4xxx/ql4_dbg.c @@ -141,21 +141,22 @@ void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha) if (is_qla8022(ha)) { ql4_printk(KERN_INFO, ha, - "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n" + "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n" " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n" " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n" - " PEG_NET_4_PC: 0x%x\n", ha->host_no, - __func__, halt_status1, halt_status2, + " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__, + ha->pdev->device, halt_status1, halt_status2, qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, - "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n" + "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n" " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", - ha->host_no, __func__, halt_status1, halt_status2); + ha->host_no, __func__, ha->pdev->device, + halt_status1, halt_status2); } } diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h index 5b0afc18ef1..51c365bcf91 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.h +++ b/drivers/scsi/qla4xxx/ql4_dbg.h @@ -12,6 +12,7 @@ /* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */ /* #define QL_DEBUG_LEVEL_4 */ /* #define QL_DEBUG_LEVEL_5 */ +/* #define QL_DEBUG_LEVEL_7 */ /* #define QL_DEBUG_LEVEL_9 */ #define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */ @@ -48,6 +49,12 @@ #define DEBUG5(x) do {} while (0); #endif /* */ +#if defined(QL_DEBUG_LEVEL_7) +#define DEBUG7(x) do {x; } while (0) +#else /* */ +#define DEBUG7(x) do {} while (0) +#endif /* */ + #if defined(QL_DEBUG_LEVEL_9) #define DEBUG9(x) do {x;} while (0); #else /* */ diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 129f5dd0282..8f6d0fb2cd8 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -64,11 +64,16 @@ #define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032 #endif +#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042 +#define PCI_DEVICE_ID_QLOGIC_ISP8042 0x8042 +#endif + #define ISP4XXX_PCI_FN_1 0x1 #define ISP4XXX_PCI_FN_2 0x3 #define QLA_SUCCESS 0 #define QLA_ERROR 1 +#define STATUS(status) status == QLA_ERROR ? "FAILED" : "SUCCEEDED" /* * Data bit definitions @@ -159,6 +164,26 @@ #define LSDW(x) ((u32)((u64)(x))) #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) +#define DEV_DB_NON_PERSISTENT 0 +#define DEV_DB_PERSISTENT 1 + +#define COPY_ISID(dst_isid, src_isid) { \ + int i, j; \ + for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ + dst_isid[i++] = src_isid[j--]; \ +} + +#define SET_BITVAL(o, n, v) { \ + if (o) \ + n |= v; \ + else \ + n &= ~v; \ +} + +#define OP_STATE(o, f, p) { \ + p = (o & f) ? "enable" : "disable"; \ +} + /* * Retry & Timeout Values */ @@ -169,7 +194,7 @@ #define ADAPTER_INIT_TOV 30 #define ADAPTER_RESET_TOV 180 #define EXTEND_CMD_TOV 60 -#define WAIT_CMD_TOV 30 +#define WAIT_CMD_TOV 5 #define EH_WAIT_CMD_TOV 120 #define FIRMWARE_UP_TOV 60 #define RESET_FIRMWARE_TOV 30 @@ -185,6 +210,9 @@ #define MAX_RESET_HA_RETRIES 2 #define FW_ALIVE_WAIT_TOV 3 +#define IDC_EXTEND_TOV 8 +#define IDC_COMP_TOV 5 +#define LINK_UP_COMP_TOV 30 #define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr) @@ -269,6 +297,8 @@ struct ddb_entry { /* Driver Re-login */ unsigned long flags; /* DDB Flags */ +#define DDB_CONN_CLOSE_FAILURE 0 /* 0x00000001 */ + uint16_t default_relogin_timeout; /* Max time to wait for * relogin to complete */ atomic_t retry_relogin_timer; /* Min Time between relogins @@ -285,6 +315,7 @@ struct ddb_entry { struct qla_ddb_index { struct list_head list; uint16_t fw_ddb_idx; + uint16_t flash_ddb_idx; struct dev_db_entry fw_ddb; uint8_t flash_isid[6]; }; @@ -319,6 +350,7 @@ struct ql4_tuple_ddb { #define DF_BOOT_TGT 1 /* Boot target entry */ #define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ #define DF_FO_MASKED 3 +#define DF_DISABLE_RELOGIN 4 /* Disable relogin to device */ enum qla4_work_type { QLA4_EVENT_AEN, @@ -363,6 +395,8 @@ struct ql82xx_hw_data { uint32_t flt_iscsi_param; uint32_t flt_region_chap; uint32_t flt_chap_size; + uint32_t flt_region_ddb; + uint32_t flt_ddb_size; }; struct qla4_8xxx_legacy_intr_set { @@ -451,6 +485,34 @@ struct ipaddress_config { uint16_t eth_mtu_size; uint16_t ipv4_port; uint16_t ipv6_port; + uint8_t control; + uint16_t ipv6_tcp_options; + uint8_t tcp_wsf; + uint8_t ipv6_tcp_wsf; + uint8_t ipv4_tos; + uint8_t ipv4_cache_id; + uint8_t ipv6_cache_id; + uint8_t ipv4_alt_cid_len; + uint8_t ipv4_alt_cid[11]; + uint8_t ipv4_vid_len; + uint8_t ipv4_vid[11]; + uint8_t ipv4_ttl; + uint16_t ipv6_flow_lbl; + uint8_t ipv6_traffic_class; + uint8_t ipv6_hop_limit; + uint32_t ipv6_nd_reach_time; + uint32_t ipv6_nd_rexmit_timer; + uint32_t ipv6_nd_stale_timeout; + uint8_t ipv6_dup_addr_detect_count; + uint32_t ipv6_gw_advrt_mtu; + uint16_t def_timeout; + uint8_t abort_timer; + uint16_t iscsi_options; + uint16_t iscsi_max_pdu_size; + uint16_t iscsi_first_burst_len; + uint16_t iscsi_max_outstnd_r2t; + uint16_t iscsi_max_burst_len; + uint8_t iscsi_name[224]; }; #define QL4_CHAP_MAX_NAME_LEN 256 @@ -501,6 +563,7 @@ struct scsi_qla_host { #define AF_INIT_DONE 1 /* 0x00000002 */ #define AF_MBOX_COMMAND 2 /* 0x00000004 */ #define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ +#define AF_ST_DISCOVERY_IN_PROGRESS 4 /* 0x00000010 */ #define AF_INTERRUPTS_ON 6 /* 0x00000040 */ #define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ #define AF_LINK_UP 8 /* 0x00000100 */ @@ -519,7 +582,6 @@ struct scsi_qla_host { #define AF_82XX_FW_DUMPED 24 /* 0x01000000 */ #define AF_8XXX_RST_OWNER 25 /* 0x02000000 */ #define AF_82XX_DUMP_READING 26 /* 0x04000000 */ -#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */ #define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */ #define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */ @@ -534,10 +596,12 @@ struct scsi_qla_host { #define DPC_AEN 9 /* 0x00000200 */ #define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ #define DPC_LINK_CHANGED 18 /* 0x00040000 */ -#define DPC_RESET_ACTIVE 20 /* 0x00040000 */ -#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/ -#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/ -#define DPC_POST_IDC_ACK 23 /* 0x00200000 */ +#define DPC_RESET_ACTIVE 20 /* 0x00100000 */ +#define DPC_HA_UNRECOVERABLE 21 /* 0x00200000 ISP-82xx only*/ +#define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/ +#define DPC_POST_IDC_ACK 23 /* 0x00800000 */ +#define DPC_RESTORE_ACB 24 /* 0x01000000 */ +#define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */ struct Scsi_Host *host; /* pointer to host data */ uint32_t tot_ddbs; @@ -706,6 +770,7 @@ struct scsi_qla_host { uint32_t fw_dump_capture_mask; void *fw_dump_tmplt_hdr; uint32_t fw_dump_tmplt_size; + uint32_t fw_dump_skip_size; struct completion mbx_intr_comp; @@ -715,12 +780,9 @@ struct scsi_qla_host { struct iscsi_iface *iface_ipv6_1; /* --- From About Firmware --- */ - uint16_t iscsi_major; - uint16_t iscsi_minor; - uint16_t bootload_major; - uint16_t bootload_minor; - uint16_t bootload_patch; - uint16_t bootload_build; + struct about_fw_info fw_info; + uint32_t fw_uptime_secs; /* seconds elapsed since fw bootup */ + uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */ uint16_t def_timeout; /* Default login timeout */ uint32_t flash_state; @@ -761,9 +823,16 @@ struct scsi_qla_host { uint32_t *reg_tbl; struct qla4_83xx_reset_template reset_tmplt; struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address - for ISP8324 */ + for ISP8324 and + and ISP8042 */ uint32_t pf_bit; struct qla4_83xx_idc_information idc_info; + struct addr_ctrl_blk *saved_acb; + int notify_idc_comp; + int notify_link_up_comp; + int idc_extend_tmo; + struct completion idc_comp; + struct completion link_up_comp; }; struct ql4_task_data { @@ -831,15 +900,21 @@ static inline int is_qla8032(struct scsi_qla_host *ha) return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324; } +static inline int is_qla8042(struct scsi_qla_host *ha) +{ + return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042; +} + static inline int is_qla80XX(struct scsi_qla_host *ha) { - return is_qla8022(ha) || is_qla8032(ha); + return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha); } static inline int is_aer_supported(struct scsi_qla_host *ha) { return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) || - (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324)); + (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) || + (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042)); } static inline int adapter_up(struct scsi_qla_host *ha) diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index ad9d2e2d370..699575efc9b 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -288,6 +288,8 @@ union external_hw_config_reg { #define FA_GOLD_RISC_CODE_ADDR_82 0x80000 #define FA_FLASH_ISCSI_CHAP 0x540000 #define FA_FLASH_CHAP_SIZE 0xC0000 +#define FA_FLASH_ISCSI_DDB 0x420000 +#define FA_FLASH_DDB_SIZE 0x080000 /* Flash Description Table */ struct qla_fdt_layout { @@ -348,6 +350,7 @@ struct qla_flt_header { #define FLT_REG_BOOT_CODE_82 0x78 #define FLT_REG_ISCSI_PARAM 0x65 #define FLT_REG_ISCSI_CHAP 0x63 +#define FLT_REG_ISCSI_DDB 0x6A struct qla_flt_region { uint32_t code; @@ -387,6 +390,7 @@ struct qla_flt_region { #define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 #define MBOX_CMD_CONN_OPEN 0x0074 #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 +#define DDB_NOT_LOGGED_IN 0x09 #define LOGOUT_OPTION_CLOSE_SESSION 0x0002 #define LOGOUT_OPTION_RELOGIN 0x0004 #define LOGOUT_OPTION_FREE_DDB 0x0008 @@ -407,6 +411,7 @@ struct qla_flt_region { #define DDB_DS_LOGIN_IN_PROCESS 0x07 #define MBOX_CMD_GET_FW_STATE 0x0069 #define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A +#define MBOX_CMD_DIAG_TEST 0x0075 #define MBOX_CMD_GET_SYS_INFO 0x0078 #define MBOX_CMD_GET_NVRAM 0x0078 /* For 40xx */ #define MBOX_CMD_SET_NVRAM 0x0079 /* For 40xx */ @@ -422,8 +427,17 @@ struct qla_flt_region { #define MBOX_CMD_GET_IP_ADDR_STATE 0x0091 #define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092 #define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093 +#define MBOX_CMD_SET_PORT_CONFIG 0x0122 +#define MBOX_CMD_GET_PORT_CONFIG 0x0123 +#define MBOX_CMD_SET_LED_CONFIG 0x0125 +#define MBOX_CMD_GET_LED_CONFIG 0x0126 #define MBOX_CMD_MINIDUMP 0x0129 +/* Port Config */ +#define ENABLE_INTERNAL_LOOPBACK 0x04 +#define ENABLE_EXTERNAL_LOOPBACK 0x08 +#define ENABLE_DCBX 0x10 + /* Minidump subcommand */ #define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00 #define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01 @@ -455,6 +469,7 @@ struct qla_flt_region { #define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 #define MBOX_CMD_IDC_ACK 0x0101 +#define MBOX_CMD_IDC_TIME_EXTEND 0x0102 #define MBOX_CMD_PORT_RESET 0x0120 #define MBOX_CMD_SET_PORT_CONFIG 0x0122 @@ -490,12 +505,17 @@ struct qla_flt_region { #define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 #define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028 #define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029 -#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B -#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C -#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D +#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED 0x802A +#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE 0x802B +#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED 0x802C +#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED 0x802D #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E +#define MBOX_ASTS_INITIALIZATION_FAILED 0x8031 +#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036 #define MBOX_ASTS_IDC_COMPLETE 0x8100 #define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101 +#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION 0x8102 +#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110 #define MBOX_ASTS_TXSCVR_INSERTED 0x8130 #define MBOX_ASTS_TXSCVR_REMOVED 0x8131 @@ -505,14 +525,18 @@ struct qla_flt_region { #define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022 #define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027 -/* ACB State Defines */ -#define ACB_STATE_UNCONFIGURED 0x00 -#define ACB_STATE_INVALID 0x01 -#define ACB_STATE_ACQUIRING 0x02 -#define ACB_STATE_TENTATIVE 0x03 -#define ACB_STATE_DEPRICATED 0x04 -#define ACB_STATE_VALID 0x05 -#define ACB_STATE_DISABLING 0x06 +/* ACB Configuration Defines */ +#define ACB_CONFIG_DISABLE 0x00 +#define ACB_CONFIG_SET 0x01 + +/* ACB/IP Address State Defines */ +#define IP_ADDRSTATE_UNCONFIGURED 0 +#define IP_ADDRSTATE_INVALID 1 +#define IP_ADDRSTATE_ACQUIRING 2 +#define IP_ADDRSTATE_TENTATIVE 3 +#define IP_ADDRSTATE_DEPRICATED 4 +#define IP_ADDRSTATE_PREFERRED 5 +#define IP_ADDRSTATE_DISABLING 6 /* FLASH offsets */ #define FLASH_SEGMENT_IFCB 0x04000000 @@ -522,9 +546,9 @@ struct qla_flt_region { #define FLASH_OPT_COMMIT 2 #define FLASH_OPT_RMW_COMMIT 3 -/* Loopback type */ -#define ENABLE_INTERNAL_LOOPBACK 0x04 -#define ENABLE_EXTERNAL_LOOPBACK 0x08 +/* generic defines to enable/disable params */ +#define QL4_PARAM_DISABLE 0 +#define QL4_PARAM_ENABLE 1 /*************************************************************************/ @@ -534,6 +558,7 @@ struct addr_ctrl_blk { #define IFCB_VER_MIN 0x01 #define IFCB_VER_MAX 0x02 uint8_t control; /* 01 */ +#define CTRLOPT_NEW_CONN_DISABLE 0x0002 uint16_t fw_options; /* 02-03 */ #define FWOPT_HEARTBEAT_ENABLE 0x1000 @@ -565,11 +590,40 @@ struct addr_ctrl_blk { uint32_t shdwreg_addr_hi; /* 2C-2F */ uint16_t iscsi_opts; /* 30-31 */ +#define ISCSIOPTS_HEADER_DIGEST_EN 0x2000 +#define ISCSIOPTS_DATA_DIGEST_EN 0x1000 +#define ISCSIOPTS_IMMEDIATE_DATA_EN 0x0800 +#define ISCSIOPTS_INITIAL_R2T_EN 0x0400 +#define ISCSIOPTS_DATA_SEQ_INORDER_EN 0x0200 +#define ISCSIOPTS_DATA_PDU_INORDER_EN 0x0100 +#define ISCSIOPTS_CHAP_AUTH_EN 0x0080 +#define ISCSIOPTS_SNACK_EN 0x0040 +#define ISCSIOPTS_DISCOVERY_LOGOUT_EN 0x0020 +#define ISCSIOPTS_BIDI_CHAP_EN 0x0010 +#define ISCSIOPTS_DISCOVERY_AUTH_EN 0x0008 +#define ISCSIOPTS_STRICT_LOGIN_COMP_EN 0x0004 +#define ISCSIOPTS_ERL 0x0003 uint16_t ipv4_tcp_opts; /* 32-33 */ +#define TCPOPT_DELAYED_ACK_DISABLE 0x8000 #define TCPOPT_DHCP_ENABLE 0x0200 +#define TCPOPT_DNS_SERVER_IP_EN 0x0100 +#define TCPOPT_SLP_DA_INFO_EN 0x0080 +#define TCPOPT_NAGLE_ALGO_DISABLE 0x0020 +#define TCPOPT_WINDOW_SCALE_DISABLE 0x0010 +#define TCPOPT_TIMER_SCALE 0x000E +#define TCPOPT_TIMESTAMP_ENABLE 0x0001 uint16_t ipv4_ip_opts; /* 34-35 */ #define IPOPT_IPV4_PROTOCOL_ENABLE 0x8000 +#define IPOPT_IPV4_TOS_EN 0x4000 #define IPOPT_VLAN_TAGGING_ENABLE 0x2000 +#define IPOPT_GRAT_ARP_EN 0x1000 +#define IPOPT_ALT_CID_EN 0x0800 +#define IPOPT_REQ_VID_EN 0x0400 +#define IPOPT_USE_VID_EN 0x0200 +#define IPOPT_LEARN_IQN_EN 0x0100 +#define IPOPT_FRAGMENTATION_DISABLE 0x0010 +#define IPOPT_IN_FORWARD_EN 0x0008 +#define IPOPT_ARP_REDIRECT_EN 0x0004 uint16_t iscsi_max_pdu_size; /* 36-37 */ uint8_t ipv4_tos; /* 38 */ @@ -620,15 +674,24 @@ struct addr_ctrl_blk { uint32_t cookie; /* 200-203 */ uint16_t ipv6_port; /* 204-205 */ uint16_t ipv6_opts; /* 206-207 */ -#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000 -#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000 +#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000 +#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000 +#define IPV6_OPT_GRAT_NEIGHBOR_ADV_EN 0x1000 +#define IPV6_OPT_REDIRECT_EN 0x0004 uint16_t ipv6_addtl_opts; /* 208-209 */ +#define IPV6_ADDOPT_IGNORE_ICMP_ECHO_REQ 0x0040 +#define IPV6_ADDOPT_MLD_EN 0x0004 #define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB Only */ #define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR 0x0001 uint16_t ipv6_tcp_opts; /* 20A-20B */ +#define IPV6_TCPOPT_DELAYED_ACK_DISABLE 0x8000 +#define IPV6_TCPOPT_NAGLE_ALGO_DISABLE 0x0020 +#define IPV6_TCPOPT_WINDOW_SCALE_DISABLE 0x0010 +#define IPV6_TCPOPT_TIMER_SCALE 0x000E +#define IPV6_TCPOPT_TIMESTAMP_EN 0x0001 uint8_t ipv6_tcp_wsf; /* 20C */ uint16_t ipv6_flow_lbl; /* 20D-20F */ uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */ @@ -636,14 +699,6 @@ struct addr_ctrl_blk { uint8_t ipv6_lnk_lcl_addr_state;/* 222 */ uint8_t ipv6_addr0_state; /* 223 */ uint8_t ipv6_addr1_state; /* 224 */ -#define IP_ADDRSTATE_UNCONFIGURED 0 -#define IP_ADDRSTATE_INVALID 1 -#define IP_ADDRSTATE_ACQUIRING 2 -#define IP_ADDRSTATE_TENTATIVE 3 -#define IP_ADDRSTATE_DEPRICATED 4 -#define IP_ADDRSTATE_PREFERRED 5 -#define IP_ADDRSTATE_DISABLING 6 - uint8_t ipv6_dflt_rtr_state; /* 225 */ #define IPV6_RTRSTATE_UNKNOWN 0 #define IPV6_RTRSTATE_MANUAL 1 @@ -779,12 +834,41 @@ struct dev_db_entry { #define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */ #define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */ +#define OPT_IS_FW_ASSIGNED_IPV6 11 +#define OPT_IPV6_DEVICE 8 +#define OPT_AUTO_SENDTGTS_DISABLE 6 +#define OPT_DISC_SESSION 4 +#define OPT_ENTRY_STATE 3 uint16_t exec_throttle; /* 02-03 */ uint16_t exec_count; /* 04-05 */ uint16_t res0; /* 06-07 */ uint16_t iscsi_options; /* 08-09 */ +#define ISCSIOPT_HEADER_DIGEST_EN 13 +#define ISCSIOPT_DATA_DIGEST_EN 12 +#define ISCSIOPT_IMMEDIATE_DATA_EN 11 +#define ISCSIOPT_INITIAL_R2T_EN 10 +#define ISCSIOPT_DATA_SEQ_IN_ORDER 9 +#define ISCSIOPT_DATA_PDU_IN_ORDER 8 +#define ISCSIOPT_CHAP_AUTH_EN 7 +#define ISCSIOPT_SNACK_REQ_EN 6 +#define ISCSIOPT_DISCOVERY_LOGOUT_EN 5 +#define ISCSIOPT_BIDI_CHAP_EN 4 +#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL 3 +#define ISCSIOPT_ERL1 1 +#define ISCSIOPT_ERL0 0 + uint16_t tcp_options; /* 0A-0B */ +#define TCPOPT_TIMESTAMP_STAT 6 +#define TCPOPT_NAGLE_DISABLE 5 +#define TCPOPT_WSF_DISABLE 4 +#define TCPOPT_TIMER_SCALE3 3 +#define TCPOPT_TIMER_SCALE2 2 +#define TCPOPT_TIMER_SCALE1 1 +#define TCPOPT_TIMESTAMP_EN 0 + uint16_t ip_options; /* 0C-0D */ +#define IPOPT_FRAGMENT_DISABLE 4 + uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */ #define BYTE_UNITS 512 uint32_t res1; /* 10-13 */ @@ -816,6 +900,8 @@ struct dev_db_entry { * much RAM */ uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */ uint8_t res5[0x10]; /* 1B0-1BF */ +#define DDB_NO_LINK 0xFFFF +#define DDB_ISNS 0xFFFD uint16_t ddb_link; /* 1C0-1C1 */ uint16_t chap_tbl_idx; /* 1C2-1C3 */ uint16_t tgt_portal_grp; /* 1C4-1C5 */ @@ -917,7 +1003,7 @@ struct about_fw_info { uint16_t bootload_minor; /* 46 - 47 */ uint16_t bootload_patch; /* 48 - 49 */ uint16_t bootload_build; /* 4A - 4B */ - uint8_t reserved2[180]; /* 4C - FF */ + uint8_t extended_timestamp[180];/* 4C - FF */ }; struct crash_record { @@ -1204,7 +1290,88 @@ struct response { }; struct ql_iscsi_stats { - uint8_t reserved1[656]; /* 0000-028F */ + uint64_t mac_tx_frames; /* 0000–0007 */ + uint64_t mac_tx_bytes; /* 0008–000F */ + uint64_t mac_tx_multicast_frames; /* 0010–0017 */ + uint64_t mac_tx_broadcast_frames; /* 0018–001F */ + uint64_t mac_tx_pause_frames; /* 0020–0027 */ + uint64_t mac_tx_control_frames; /* 0028–002F */ + uint64_t mac_tx_deferral; /* 0030–0037 */ + uint64_t mac_tx_excess_deferral; /* 0038–003F */ + uint64_t mac_tx_late_collision; /* 0040–0047 */ + uint64_t mac_tx_abort; /* 0048–004F */ + uint64_t mac_tx_single_collision; /* 0050–0057 */ + uint64_t mac_tx_multiple_collision; /* 0058–005F */ + uint64_t mac_tx_collision; /* 0060–0067 */ + uint64_t mac_tx_frames_dropped; /* 0068–006F */ + uint64_t mac_tx_jumbo_frames; /* 0070–0077 */ + uint64_t mac_rx_frames; /* 0078–007F */ + uint64_t mac_rx_bytes; /* 0080–0087 */ + uint64_t mac_rx_unknown_control_frames; /* 0088–008F */ + uint64_t mac_rx_pause_frames; /* 0090–0097 */ + uint64_t mac_rx_control_frames; /* 0098–009F */ + uint64_t mac_rx_dribble; /* 00A0–00A7 */ + uint64_t mac_rx_frame_length_error; /* 00A8–00AF */ + uint64_t mac_rx_jabber; /* 00B0–00B7 */ + uint64_t mac_rx_carrier_sense_error; /* 00B8–00BF */ + uint64_t mac_rx_frame_discarded; /* 00C0–00C7 */ + uint64_t mac_rx_frames_dropped; /* 00C8–00CF */ + uint64_t mac_crc_error; /* 00D0–00D7 */ + uint64_t mac_encoding_error; /* 00D8–00DF */ + uint64_t mac_rx_length_error_large; /* 00E0–00E7 */ + uint64_t mac_rx_length_error_small; /* 00E8–00EF */ + uint64_t mac_rx_multicast_frames; /* 00F0–00F7 */ + uint64_t mac_rx_broadcast_frames; /* 00F8–00FF */ + uint64_t ip_tx_packets; /* 0100–0107 */ + uint64_t ip_tx_bytes; /* 0108–010F */ + uint64_t ip_tx_fragments; /* 0110–0117 */ + uint64_t ip_rx_packets; /* 0118–011F */ + uint64_t ip_rx_bytes; /* 0120–0127 */ + uint64_t ip_rx_fragments; /* 0128–012F */ + uint64_t ip_datagram_reassembly; /* 0130–0137 */ + uint64_t ip_invalid_address_error; /* 0138–013F */ + uint64_t ip_error_packets; /* 0140–0147 */ + uint64_t ip_fragrx_overlap; /* 0148–014F */ + uint64_t ip_fragrx_outoforder; /* 0150–0157 */ + uint64_t ip_datagram_reassembly_timeout; /* 0158–015F */ + uint64_t ipv6_tx_packets; /* 0160–0167 */ + uint64_t ipv6_tx_bytes; /* 0168–016F */ + uint64_t ipv6_tx_fragments; /* 0170–0177 */ + uint64_t ipv6_rx_packets; /* 0178–017F */ + uint64_t ipv6_rx_bytes; /* 0180–0187 */ + uint64_t ipv6_rx_fragments; /* 0188–018F */ + uint64_t ipv6_datagram_reassembly; /* 0190–0197 */ + uint64_t ipv6_invalid_address_error; /* 0198–019F */ + uint64_t ipv6_error_packets; /* 01A0–01A7 */ + uint64_t ipv6_fragrx_overlap; /* 01A8–01AF */ + uint64_t ipv6_fragrx_outoforder; /* 01B0–01B7 */ + uint64_t ipv6_datagram_reassembly_timeout; /* 01B8–01BF */ + uint64_t tcp_tx_segments; /* 01C0–01C7 */ + uint64_t tcp_tx_bytes; /* 01C8–01CF */ + uint64_t tcp_rx_segments; /* 01D0–01D7 */ + uint64_t tcp_rx_byte; /* 01D8–01DF */ + uint64_t tcp_duplicate_ack_retx; /* 01E0–01E7 */ + uint64_t tcp_retx_timer_expired; /* 01E8–01EF */ + uint64_t tcp_rx_duplicate_ack; /* 01F0–01F7 */ + uint64_t tcp_rx_pure_ackr; /* 01F8–01FF */ + uint64_t tcp_tx_delayed_ack; /* 0200–0207 */ + uint64_t tcp_tx_pure_ack; /* 0208–020F */ + uint64_t tcp_rx_segment_error; /* 0210–0217 */ + uint64_t tcp_rx_segment_outoforder; /* 0218–021F */ + uint64_t tcp_rx_window_probe; /* 0220–0227 */ + uint64_t tcp_rx_window_update; /* 0228–022F */ + uint64_t tcp_tx_window_probe_persist; /* 0230–0237 */ + uint64_t ecc_error_correction; /* 0238–023F */ + uint64_t iscsi_pdu_tx; /* 0240-0247 */ + uint64_t iscsi_data_bytes_tx; /* 0248-024F */ + uint64_t iscsi_pdu_rx; /* 0250-0257 */ + uint64_t iscsi_data_bytes_rx; /* 0258-025F */ + uint64_t iscsi_io_completed; /* 0260-0267 */ + uint64_t iscsi_unexpected_io_rx; /* 0268-026F */ + uint64_t iscsi_format_error; /* 0270-0277 */ + uint64_t iscsi_hdr_digest_error; /* 0278-027F */ + uint64_t iscsi_data_digest_error; /* 0280-0287 */ + uint64_t iscsi_sequence_error; /* 0288-028F */ uint32_t tx_cmd_pdu; /* 0290-0293 */ uint32_t tx_resp_pdu; /* 0294-0297 */ uint32_t rx_cmd_pdu; /* 0298-029B */ @@ -1248,6 +1415,9 @@ struct ql_iscsi_stats { #define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16 #define QLA83XX_SS_OCM_WNDREG_INDEX 3 #define QLA83XX_SS_PCI_INDEX 0 +#define QLA8022_TEMPLATE_CAP_OFFSET 172 +#define QLA83XX_TEMPLATE_CAP_OFFSET 268 +#define QLA80XX_TEMPLATE_RESERVED_BITS 16 struct qla4_8xxx_minidump_template_hdr { uint32_t entry_type; @@ -1267,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr { uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN]; uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN]; uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN]; + uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS]; }; #endif /* _QLA4X_FW_H */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 982293edf02..5f58b451327 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -83,6 +83,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, char *password, int bidi, uint16_t *chap_index); +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx, int bidi); void qla4xxx_queue_iocb(struct scsi_qla_host *ha); void qla4xxx_complete_iocb(struct scsi_qla_host *ha); @@ -191,6 +193,9 @@ int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options, int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha, uint32_t status, uint32_t pid, uint32_t data_size, uint8_t *data); +int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index); /* BSG Functions */ int qla4xxx_bsg_request(struct bsg_job *bsg_job); @@ -224,8 +229,6 @@ void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha, int qla4_83xx_isp_reset(struct scsi_qla_host *ha); void qla4_83xx_queue_iocb(struct scsi_qla_host *ha); void qla4_83xx_complete_iocb(struct scsi_qla_host *ha); -uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha); -uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha); uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr); void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val); int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, @@ -261,6 +264,24 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha); void qla4_83xx_disable_pause(struct scsi_qla_host *ha); void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha); int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha); +int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, + dma_addr_t dma_addr); +int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, + char *password, uint16_t chap_index); +int qla4xxx_disable_acb(struct scsi_qla_host *ha); +int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t acb_dma); +int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, + uint32_t acb_type, uint32_t len); +int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, + uint64_t addr, uint32_t *data, uint32_t count); +uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); +int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); +int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); +int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha); +int qla4_83xx_is_detached(struct scsi_qla_host *ha); +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha); extern int ql4xextended_error_logging; extern int ql4xdontresethba; diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 8fc8548ba4b..6f12f859b11 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -107,7 +107,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha) (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in); writel(0, (unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, (unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in); writel(0, @@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) return ipv4_wait|ipv6_wait; } +static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha, + struct qla4_8xxx_minidump_template_hdr *md_hdr) +{ + int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET : + QLA83XX_TEMPLATE_CAP_OFFSET; + int rval = 1; + uint32_t *cap_offset; + + cap_offset = (uint32_t *)((char *)md_hdr + offset); + + if (!(le32_to_cpu(*cap_offset) & BIT_0)) { + ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n", + *cap_offset); + rval = 0; + } + + return rval; +} + /** * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. * @ha: pointer to host adapter structure. @@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) void *md_tmp; dma_addr_t md_tmp_dma; struct qla4_8xxx_minidump_template_hdr *md_hdr; + int dma_capable; if (ha->fw_dump) { ql4_printk(KERN_WARNING, ha, @@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; + dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr); + capture_debug_level = md_hdr->capture_debug_level; /* Get capture mask based on module loadtime setting. */ - if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) + if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) || + (ql4xmdcapmask == 0xFF && dma_capable)) { ha->fw_dump_capture_mask = ql4xmdcapmask; - else + } else { + if (ql4xmdcapmask == 0xFF) + ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n"); ha->fw_dump_capture_mask = capture_debug_level; + } md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; @@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha) if (status == QLA_SUCCESS) { if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) qla4xxx_get_crash_record(ha); + + qla4xxx_init_rings(ha); } else { DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", ha->host_no, __func__)); @@ -940,7 +968,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset) * while switching from polling to interrupt mode. IOCB interrupts are * enabled via isp_ops->enable_intrs. */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) qla4_83xx_enable_mbox_intrs(ha); if (qla4xxx_about_firmware(ha) == QLA_ERROR) @@ -959,13 +987,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset) qla4xxx_build_ddb_list(ha, is_reset); set_bit(AF_ONLINE, &ha->flags); -exit_init_hba: - if (is_qla80XX(ha) && (status == QLA_ERROR)) { - /* Since interrupts are registered in start_firmware for - * 80XX, release them here if initialize_adapter fails */ - qla4xxx_free_irqs(ha); - } +exit_init_hba: DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); return status; diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h index 6f4decd44c6..655b7bb644d 100644 --- a/drivers/scsi/qla4xxx/ql4_inline.h +++ b/drivers/scsi/qla4xxx/ql4_inline.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -82,3 +82,15 @@ qla4xxx_disable_intrs(struct scsi_qla_host *ha) __qla4xxx_disable_intrs(ha); spin_unlock_irqrestore(&ha->hardware_lock, flags); } + +static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry) +{ + int type; + + if (chap_entry->flags & BIT_7) + type = LOCAL_CHAP; + else + type = BIDI_CHAP; + + return type; +} diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 14fec976f63..e5697ab144d 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb, mrb->mbox_cmd = in_mbox[0]; wmb(); + ha->iocb_cnt += mrb->iocb_cnt; ha->isp_ops->queue_iocb(ha); exit_mbox_iocb: spin_unlock_irqrestore(&ha->hardware_lock, flags); diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 1b83dc283d2..081b6b78d2c 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -385,9 +385,9 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha, cls_conn = ddb_entry->conn; conn = cls_conn->dd_data; - spin_lock(&conn->session->lock); + spin_lock(&conn->session->back_lock); task = iscsi_itt_to_task(conn, itt); - spin_unlock(&conn->session->lock); + spin_unlock(&conn->session->back_lock); if (task == NULL) { ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__); @@ -396,7 +396,6 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha, task_data = task->dd_data; memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status)); - ha->req_q_count += task_data->iocb_req_cnt; ha->iocb_cnt -= task_data->iocb_req_cnt; queue_work(ha->task_wq, &task_data->task_work); } @@ -416,7 +415,6 @@ static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha, return mrb; /* update counters */ - ha->req_q_count += mrb->iocb_cnt; ha->iocb_cnt -= mrb->iocb_cnt; return mrb; @@ -590,7 +588,7 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha) { int rval = 1; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) || (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) { DEBUG2(ql4_printk(KERN_INFO, ha, @@ -608,6 +606,48 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha) return rval; } +static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha, + uint32_t ipaddr_idx, + uint32_t ipaddr_fw_state) +{ + uint8_t ipaddr_state; + uint8_t ip_idx; + + ip_idx = ipaddr_idx & 0xF; + ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state); + + switch (ip_idx) { + case 0: + ha->ip_config.ipv4_addr_state = ipaddr_state; + break; + case 1: + ha->ip_config.ipv6_link_local_state = ipaddr_state; + break; + case 2: + ha->ip_config.ipv6_addr0_state = ipaddr_state; + break; + case 3: + ha->ip_config.ipv6_addr1_state = ipaddr_state; + break; + default: + ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n", + __func__, ip_idx); + } +} + +static void qla4xxx_default_router_changed(struct scsi_qla_host *ha, + uint32_t *mbox_sts) +{ + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0], + &mbox_sts[2], sizeof(uint32_t)); + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1], + &mbox_sts[3], sizeof(uint32_t)); + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2], + &mbox_sts[4], sizeof(uint32_t)); + memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3], + &mbox_sts[5], sizeof(uint32_t)); +} + /** * qla4xxx_isr_decode_mailbox - decodes mailbox status * @ha: Pointer to host adapter structure. @@ -622,8 +662,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, int i; uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; __le32 __iomem *mailbox_out; + uint32_t opcode = 0; - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0]; else if (is_qla8022(ha)) mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0]; @@ -667,7 +708,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, qla4xxx_dump_registers(ha); if ((is_qla8022(ha) && ql4xdontresethba) || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", ha->host_no, __func__)); } else { @@ -699,6 +741,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP, sizeof(mbox_sts), (uint8_t *) mbox_sts); + + if ((is_qla8032(ha) || is_qla8042(ha)) && + ha->notify_link_up_comp) + complete(&ha->link_up_comp); + break; case MBOX_ASTS_LINK_DOWN: @@ -742,21 +789,46 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0], mbox_sts[2], mbox_sts[3]); + qla4xxx_update_ipaddr_state(ha, mbox_sts[5], + mbox_sts[3]); /* mbox_sts[2] = Old ACB state * mbox_sts[3] = new ACB state */ - if ((mbox_sts[3] == ACB_STATE_VALID) && - ((mbox_sts[2] == ACB_STATE_TENTATIVE) || - (mbox_sts[2] == ACB_STATE_ACQUIRING))) + if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) && + ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) || + (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) { set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); - else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && - (mbox_sts[2] == ACB_STATE_VALID)) { + } else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) && + (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) { if (is_qla80XX(ha)) set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags); else set_bit(DPC_RESET_HA, &ha->dpc_flags); - } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) + } else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n", + ha->host_no, __func__); + } else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) { complete(&ha->disable_acb_comp); + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n", + ha->host_no, __func__); + } + break; + + case MBOX_ASTS_IPV6_LINK_MTU_CHANGE: + case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED: + case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED: + /* No action */ + DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n", + ha->host_no, mbox_status)); + break; + + case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, IPv6 ERROR, " + "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); break; case MBOX_ASTS_MAC_ADDRESS_CHANGED: @@ -836,9 +908,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, break; case MBOX_ASTS_IDC_REQUEST_NOTIFICATION: - { - uint32_t opcode; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], @@ -857,10 +927,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, } } break; - } case MBOX_ASTS_IDC_COMPLETE: - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], @@ -870,13 +939,77 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, "scsi:%ld: AEN %04x IDC Complete notification\n", ha->host_no, mbox_sts[0])); - if (qla4_83xx_loopback_in_progress(ha)) + opcode = mbox_sts[1] >> 16; + if (ha->notify_idc_comp) + complete(&ha->idc_comp); + + if ((opcode == MBOX_CMD_SET_PORT_CONFIG) || + (opcode == MBOX_CMD_PORT_RESET)) + ha->idc_info.info2 = mbox_sts[3]; + + if (qla4_83xx_loopback_in_progress(ha)) { set_bit(AF_LOOPBACK, &ha->flags); - else + } else { clear_bit(AF_LOOPBACK, &ha->flags); + if (ha->saved_acb) + set_bit(DPC_RESTORE_ACB, + &ha->dpc_flags); + } + qla4xxx_wake_dpc(ha); } break; + case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x Received IPv6 default router changed notification\n", + ha->host_no, mbox_sts[0])); + qla4xxx_default_router_changed(ha, mbox_sts); + break; + + case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n", + ha->host_no, mbox_sts[0])); + /* new IDC timeout */ + ha->idc_extend_tmo = mbox_sts[1]; + break; + + case MBOX_ASTS_INITIALIZATION_FAILED: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n", + ha->host_no, mbox_sts[0], + mbox_sts[3])); + break; + + case MBOX_ASTS_SYSTEM_WARNING_EVENT: + DEBUG2(ql4_printk(KERN_WARNING, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + break; + + case MBOX_ASTS_DCBX_CONF_CHANGE: + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", + ha->host_no, mbox_sts[0], mbox_sts[1], + mbox_sts[2], mbox_sts[3], mbox_sts[4], + mbox_sts[5])); + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: AEN %04x Received DCBX configuration changed notification\n", + ha->host_no, mbox_sts[0])); + break; + default: DEBUG2(printk(KERN_WARNING "scsi%ld: AEN %04x UNKNOWN\n", @@ -919,7 +1052,8 @@ void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha, uint32_t intr_status) { /* Process response queue interrupt. */ - if (intr_status & HSRX_RISC_IOCB_INT) + if ((intr_status & HSRX_RISC_IOCB_INT) && + test_bit(AF_INIT_DONE, &ha->flags)) qla4xxx_process_response_queue(ha); /* Process mailbox/asynch event interrupt.*/ @@ -1099,8 +1233,8 @@ irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id) status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG); if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) { - DEBUG2(ql4_printk(KERN_INFO, ha, - "%s legacy Int not triggered\n", __func__)); + DEBUG7(ql4_printk(KERN_INFO, ha, + "%s legacy Int not triggered\n", __func__)); return IRQ_NONE; } @@ -1158,7 +1292,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id) /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */ if (!(leg_int_ptr & LEG_INT_PTR_B31)) { - DEBUG2(ql4_printk(KERN_ERR, ha, + DEBUG7(ql4_printk(KERN_ERR, ha, "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n", __func__)); return IRQ_NONE; @@ -1166,7 +1300,7 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id) /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */ if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) { - DEBUG2(ql4_printk(KERN_ERR, ha, + DEBUG7(ql4_printk(KERN_ERR, ha, "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n", __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit)); @@ -1262,7 +1396,7 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id) uint32_t intr_status; uint8_t reqs_count = 0; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_mailbox_intr_handler(irq, dev_id); } else { spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1296,10 +1430,11 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id) { struct scsi_qla_host *ha = dev_id; unsigned long flags; + int intr_status; uint32_t ival = 0; spin_lock_irqsave(&ha->hardware_lock, flags); - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { ival = readl(&ha->qla4_83xx_reg->iocb_int_mask); if (ival == 0) { ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n", @@ -1309,8 +1444,15 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id) qla4xxx_process_response_queue(ha); writel(0, &ha->qla4_83xx_reg->iocb_int_mask); } else { - qla4xxx_process_response_queue(ha); - writel(0, &ha->qla4_82xx_reg->host_int); + intr_status = readl(&ha->qla4_82xx_reg->host_status); + if (intr_status & HSRX_RISC_IOCB_INT) { + qla4xxx_process_response_queue(ha); + writel(0, &ha->qla4_82xx_reg->host_int); + } else { + ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n", + __func__); + goto exit_msix_rsp_q; + } } ha->isr_count++; exit_msix_rsp_q: @@ -1384,16 +1526,17 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) int qla4xxx_request_irqs(struct scsi_qla_host *ha) { - int ret; + int ret = 0; + int rval = QLA_ERROR; if (is_qla40XX(ha)) goto try_intx; if (ql4xenablemsix == 2) { - /* Note: MSI Interrupts not supported for ISP8324 */ - if (is_qla8032(ha)) { - ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n", - __func__); + /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */ + if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n", + __func__, ha->pdev->device); goto try_intx; } goto try_msi; @@ -1409,9 +1552,9 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha) "MSI-X: Enabled (0x%X).\n", ha->revision_id)); goto irq_attached; } else { - if (is_qla8032(ha)) { - ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n", - __func__, ret); + if (is_qla8032(ha) || is_qla8042(ha)) { + ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n", + __func__, ha->pdev->device, ret); goto try_intx; } } @@ -1437,15 +1580,13 @@ try_msi: } } - /* - * Prevent interrupts from falling back to INTx mode in cases where - * interrupts cannot get acquired through MSI-X or MSI mode. - */ +try_intx: if (is_qla8022(ha)) { - ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret); + ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n", + __func__); goto irq_not_attached; } -try_intx: + /* Trying INTx */ ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, IRQF_SHARED, DRIVER_NAME, ha); @@ -1465,9 +1606,10 @@ irq_attached: set_bit(AF_IRQ_ATTACHED, &ha->flags); ha->host->irq = ha->pdev->irq; ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", - __func__, ha->pdev->irq); + __func__, ha->pdev->irq); + rval = QLA_SUCCESS; irq_not_attached: - return ret; + return rval; } void qla4xxx_free_irqs(struct scsi_qla_host *ha) diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 160d3369721..0a3312c6dd6 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -1,10 +1,11 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ +#include <linux/ctype.h> #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" @@ -52,7 +53,7 @@ static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha) { int rval = 1; - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { if (test_bit(AF_IRQ_ATTACHED, &ha->flags) && test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) rval = 0; @@ -211,9 +212,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, ha->host_no, __func__)); goto mbox_exit; } - DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...," - " Scheduling Adapter Reset\n", ha->host_no, - mbx_cmd[0])); + ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n", + ha->host_no, mbx_cmd[0]); ha->mailbox_timeout_count++; mbx_sts[0] = (-1); set_bit(DPC_RESET_HA, &ha->dpc_flags); @@ -223,7 +223,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0 | CRB_NIU_XG_PAUSE_CTL_P1); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n", __func__); qla4_83xx_disable_pause(ha); @@ -250,15 +250,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, break; case MBOX_STS_BUSY: - DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n", - ha->host_no, __func__, mbx_cmd[0])); + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n", + ha->host_no, __func__, mbx_cmd[0]); ha->mailbox_timeout_count++; break; default: - DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, " - "sts = %08X ****\n", ha->host_no, __func__, - mbx_cmd[0], mbx_sts[0])); + ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n", + ha->host_no, __func__, mbx_cmd[0], mbx_sts[0], + mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4], + mbx_sts[5], mbx_sts[6], mbx_sts[7]); break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -382,7 +383,6 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, mbox_cmd[2] = LSDW(init_fw_cb_dma); mbox_cmd[3] = MSDW(init_fw_cb_dma); mbox_cmd[4] = sizeof(struct addr_ctrl_blk); - mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN; if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) != QLA_SUCCESS) { @@ -417,6 +417,38 @@ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, return QLA_SUCCESS; } +uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state) +{ + uint8_t ipaddr_state; + + switch (fw_ipaddr_state) { + case IP_ADDRSTATE_UNCONFIGURED: + ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; + break; + case IP_ADDRSTATE_INVALID: + ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID; + break; + case IP_ADDRSTATE_ACQUIRING: + ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING; + break; + case IP_ADDRSTATE_TENTATIVE: + ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE; + break; + case IP_ADDRSTATE_DEPRICATED: + ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED; + break; + case IP_ADDRSTATE_PREFERRED: + ipaddr_state = ISCSI_IPDDRESS_STATE_VALID; + break; + case IP_ADDRSTATE_DISABLING: + ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING; + break; + default: + ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; + } + return ipaddr_state; +} + static void qla4xxx_update_local_ip(struct scsi_qla_host *ha, struct addr_ctrl_blk *init_fw_cb) @@ -424,7 +456,7 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha, ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts); ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts); ha->ip_config.ipv4_addr_state = - le16_to_cpu(init_fw_cb->ipv4_addr_state); + qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state); ha->ip_config.eth_mtu_size = le16_to_cpu(init_fw_cb->eth_mtu_size); ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port); @@ -433,6 +465,8 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha, ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts); ha->ip_config.ipv6_addl_options = le16_to_cpu(init_fw_cb->ipv6_addtl_opts); + ha->ip_config.ipv6_tcp_options = + le16_to_cpu(init_fw_cb->ipv6_tcp_opts); } /* Save IPv4 Address Info */ @@ -447,17 +481,65 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha, sizeof(init_fw_cb->ipv4_gw_addr))); ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag); + ha->ip_config.control = init_fw_cb->control; + ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf; + ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos; + ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid; + ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len; + memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid, + min(sizeof(ha->ip_config.ipv4_alt_cid), + sizeof(init_fw_cb->ipv4_dhcp_alt_cid))); + ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len; + memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid, + min(sizeof(ha->ip_config.ipv4_vid), + sizeof(init_fw_cb->ipv4_dhcp_vid))); + ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl; + ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout); + ha->ip_config.abort_timer = init_fw_cb->abort_timer; + ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts); + ha->ip_config.iscsi_max_pdu_size = + le16_to_cpu(init_fw_cb->iscsi_max_pdu_size); + ha->ip_config.iscsi_first_burst_len = + le16_to_cpu(init_fw_cb->iscsi_fburst_len); + ha->ip_config.iscsi_max_outstnd_r2t = + le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t); + ha->ip_config.iscsi_max_burst_len = + le16_to_cpu(init_fw_cb->iscsi_max_burst_len); + memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name, + min(sizeof(ha->ip_config.iscsi_name), + sizeof(init_fw_cb->iscsi_name))); if (is_ipv6_enabled(ha)) { /* Save IPv6 Address */ ha->ip_config.ipv6_link_local_state = - le16_to_cpu(init_fw_cb->ipv6_lnk_lcl_addr_state); + qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state); ha->ip_config.ipv6_addr0_state = - le16_to_cpu(init_fw_cb->ipv6_addr0_state); + qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state); ha->ip_config.ipv6_addr1_state = - le16_to_cpu(init_fw_cb->ipv6_addr1_state); - ha->ip_config.ipv6_default_router_state = - le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state); + qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state); + + switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) { + case IPV6_RTRSTATE_UNKNOWN: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_UNKNOWN; + break; + case IPV6_RTRSTATE_MANUAL: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_MANUAL; + break; + case IPV6_RTRSTATE_ADVERTISED: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_ADVERTISED; + break; + case IPV6_RTRSTATE_STALE: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_STALE; + break; + default: + ha->ip_config.ipv6_default_router_state = + ISCSI_ROUTER_STATE_UNKNOWN; + } + ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE; ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; @@ -478,6 +560,23 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha, ha->ip_config.ipv6_vlan_tag = be16_to_cpu(init_fw_cb->ipv6_vlan_tag); ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port); + ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id; + ha->ip_config.ipv6_flow_lbl = + le16_to_cpu(init_fw_cb->ipv6_flow_lbl); + ha->ip_config.ipv6_traffic_class = + init_fw_cb->ipv6_traffic_class; + ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit; + ha->ip_config.ipv6_nd_reach_time = + le32_to_cpu(init_fw_cb->ipv6_nd_reach_time); + ha->ip_config.ipv6_nd_rexmit_timer = + le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer); + ha->ip_config.ipv6_nd_stale_timeout = + le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout); + ha->ip_config.ipv6_dup_addr_detect_count = + init_fw_cb->ipv6_dup_addr_detect_count; + ha->ip_config.ipv6_gw_advrt_mtu = + le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu); + ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf; } } @@ -548,9 +647,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) goto exit_init_fw_cb; } - /* Initialize request and response queues. */ - qla4xxx_init_rings(ha); - /* Fill in the request and response queue information. */ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out); init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in); @@ -902,6 +998,10 @@ int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha, "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT " "failed sts %04X %04X", __func__, mbox_sts[0], mbox_sts[1])); + if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) && + (mbox_sts[1] == DDB_NOT_LOGGED_IN)) { + set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); + } } return status; @@ -1129,6 +1229,7 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; + uint32_t scsi_lun[2]; int status = QLA_SUCCESS; DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no, @@ -1140,10 +1241,16 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry, */ memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_sts)); + int_to_scsilun(lun, (struct scsi_lun *) scsi_lun); mbox_cmd[0] = MBOX_CMD_LUN_RESET; mbox_cmd[1] = ddb_entry->fw_ddb_index; - mbox_cmd[2] = lun << 8; + /* FW expects LUN bytes 0-3 in Incoming Mailbox 2 + * (LUN byte 0 is LSByte, byte 3 is MSByte) */ + mbox_cmd[2] = cpu_to_le32(scsi_lun[0]); + /* FW expects LUN bytes 4-7 in Incoming Mailbox 3 + * (LUN byte 4 is LSByte, byte 7 is MSByte) */ + mbox_cmd[3] = cpu_to_le32(scsi_lun[1]); mbox_cmd[5] = 0x01; /* Immediate Command Enable */ qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]); @@ -1263,16 +1370,28 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha) } /* Save version information. */ - ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major); - ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor); - ha->patch_number = le16_to_cpu(about_fw->fw_patch); - ha->build_number = le16_to_cpu(about_fw->fw_build); - ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major); - ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor); - ha->bootload_major = le16_to_cpu(about_fw->bootload_major); - ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor); - ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch); - ha->bootload_build = le16_to_cpu(about_fw->bootload_build); + ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major); + ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor); + ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch); + ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build); + memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date, + sizeof(about_fw->fw_build_date)); + memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time, + sizeof(about_fw->fw_build_time)); + strcpy((char *)ha->fw_info.fw_build_user, + skip_spaces((char *)about_fw->fw_build_user)); + ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source); + ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major); + ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor); + ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major); + ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor); + ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch); + ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build); + strcpy((char *)ha->fw_info.extended_timestamp, + skip_spaces((char *)about_fw->extended_timestamp)); + + ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]); + ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]); status = QLA_SUCCESS; exit_about_fw: @@ -1281,8 +1400,8 @@ exit_about_fw: return status; } -static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, - dma_addr_t dma_addr) +int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, + dma_addr_t dma_addr) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; @@ -1410,6 +1529,55 @@ exit_bootdb_failed: return status; } +int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index) +{ + uint32_t dev_db_start_offset; + uint32_t dev_db_end_offset; + int status = QLA_ERROR; + + memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); + + if (is_qla40XX(ha)) { + dev_db_start_offset = FLASH_OFFSET_DB_INFO; + dev_db_end_offset = FLASH_OFFSET_DB_END; + } else { + dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + + (ha->hw.flt_region_ddb << 2); + /* flt_ddb_size is DDB table size for both ports + * so divide it by 2 to calculate the offset for second port + */ + if (ha->port_num == 1) + dev_db_start_offset += (ha->hw.flt_ddb_size / 2); + + dev_db_end_offset = dev_db_start_offset + + (ha->hw.flt_ddb_size / 2); + } + + dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry)); + + if (dev_db_start_offset > dev_db_end_offset) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s:Invalid DDB index %d", __func__, + ddb_index)); + goto exit_fdb_failed; + } + + if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + sizeof(*fw_ddb_entry)) != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n", + ha->host_no, __func__); + goto exit_fdb_failed; + } + + if (fw_ddb_entry->cookie == DDB_VALID_COOKIE) + status = QLA_SUCCESS; + +exit_fdb_failed: + return status; +} + int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, uint16_t idx) { @@ -1461,13 +1629,26 @@ exit_get_chap: return ret; } -static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, - char *password, uint16_t idx, int bidi) +/** + * qla4xxx_set_chap - Make a chap entry at the given index + * @ha: pointer to adapter structure + * @username: CHAP username to set + * @password: CHAP password to set + * @idx: CHAP index at which to make the entry + * @bidi: type of chap entry (chap_in or chap_out) + * + * Create chap entry at the given index with the information provided. + * + * Note: Caller should acquire the chap lock before getting here. + **/ +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx, int bidi) { int ret = 0; int rval = QLA_ERROR; uint32_t offset = 0; struct ql4_chap_table *chap_table; + uint32_t chap_size = 0; dma_addr_t chap_dma; chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); @@ -1485,7 +1666,20 @@ static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN); strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN); chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); - offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table)); + + if (is_qla40XX(ha)) { + chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table); + offset = FLASH_CHAP_OFFSET; + } else { /* Single region contains CHAP info for both ports which is + * divided into half for each port. + */ + chap_size = ha->hw.flt_chap_size / 2; + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + if (ha->port_num == 1) + offset += chap_size; + } + + offset += (idx * sizeof(struct ql4_chap_table)); rval = qla4xxx_set_flash(ha, chap_dma, offset, sizeof(struct ql4_chap_table), FLASH_OPT_RMW_COMMIT); @@ -1503,6 +1697,62 @@ exit_set_chap: return ret; } + +int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, + char *password, uint16_t chap_index) +{ + int rval = QLA_ERROR; + struct ql4_chap_table *chap_table = NULL; + int max_chap_entries; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); + rval = QLA_ERROR; + goto exit_uni_chap; + } + + if (!username || !password) { + ql4_printk(KERN_ERR, ha, "No memory for username & secret\n"); + rval = QLA_ERROR; + goto exit_uni_chap; + } + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (chap_index > max_chap_entries) { + ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); + rval = QLA_ERROR; + goto exit_uni_chap; + } + + mutex_lock(&ha->chap_sem); + chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index; + if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + rval = QLA_ERROR; + goto exit_unlock_uni_chap; + } + + if (!(chap_table->flags & BIT_7)) { + ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n"); + rval = QLA_ERROR; + goto exit_unlock_uni_chap; + } + + strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); + strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN); + + rval = QLA_SUCCESS; + +exit_unlock_uni_chap: + mutex_unlock(&ha->chap_sem); +exit_uni_chap: + return rval; +} + /** * qla4xxx_get_chap_index - Get chap index given username and secret * @ha: pointer to adapter structure @@ -1524,7 +1774,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, int max_chap_entries = 0; struct ql4_chap_table *chap_table; - if (is_qla8022(ha)) + if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else @@ -1611,6 +1861,45 @@ int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, return status; } +/** + * qla4_84xx_extend_idc_tmo - Extend IDC Timeout. + * @ha: Pointer to host adapter structure. + * @ext_tmo: idc timeout value + * + * Requests firmware to extend the idc timeout value. + **/ +static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + ext_tmo &= 0xf; + + mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND; + mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) | + (ext_tmo << 8)); /* new timeout */ + mbox_cmd[2] = ha->idc_info.info1; + mbox_cmd[3] = ha->idc_info.info2; + mbox_cmd[4] = ha->idc_info.info3; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: failed status %04X\n", + ha->host_no, __func__, mbox_sts[0])); + return QLA_ERROR; + } else { + ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n", + __func__, ext_tmo); + } + + return QLA_SUCCESS; +} + int qla4xxx_disable_acb(struct scsi_qla_host *ha) { uint32_t mbox_cmd[MBOX_REG_COUNT]; @@ -1627,6 +1916,24 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha) DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB " "failed w/ status %04X %04X %04X", __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2])); + } else { + if (is_qla8042(ha) && + test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) && + (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) { + /* + * Disable ACB mailbox command takes time to complete + * based on the total number of targets connected. + * For 512 targets, it took approximately 5 secs to + * complete. Setting the timeout value to 8, with the 3 + * secs buffer. + */ + qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV); + if (!wait_for_completion_timeout(&ha->disable_acb_comp, + IDC_EXTEND_TOV * HZ)) { + ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n", + __func__); + } + } } return status; } @@ -2033,8 +2340,125 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha) ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, mbox_sts[0]); else - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", - __func__)); + ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__); + + return status; +} + +int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct addr_ctrl_blk *acb = NULL; + uint32_t acb_len = sizeof(struct addr_ctrl_blk); + int rval = QLA_SUCCESS; + dma_addr_t acb_dma; + + acb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &acb_dma, GFP_KERNEL); + if (!acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); + rval = QLA_ERROR; + goto exit_config_acb; + } + memset(acb, 0, acb_len); + + switch (acb_config) { + case ACB_CONFIG_DISABLE: + rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + rval = qla4xxx_disable_acb(ha); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + if (!ha->saved_acb) + ha->saved_acb = kzalloc(acb_len, GFP_KERNEL); + + if (!ha->saved_acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", + __func__); + rval = QLA_ERROR; + goto exit_free_acb; + } + memcpy(ha->saved_acb, acb, acb_len); + break; + case ACB_CONFIG_SET: + + if (!ha->saved_acb) { + ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n", + __func__); + rval = QLA_ERROR; + goto exit_free_acb; + } + + memcpy(acb, ha->saved_acb, acb_len); + + rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); + if (rval != QLA_SUCCESS) + goto exit_free_acb; + + break; + default: + ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n", + __func__); + } + +exit_free_acb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, + acb_dma); +exit_config_acb: + if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) { + kfree(ha->saved_acb); + ha->saved_acb = NULL; + } + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s %s\n", __func__, + rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); + return rval; +} + +int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status == QLA_SUCCESS) + *config = mbox_sts[1]; + else + ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, + mbox_sts[0]); + + return status; +} + +int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG; + mbox_cmd[1] = *config; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, + mbox_cmd, mbox_sts); + if (status != QLA_SUCCESS) + ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, + mbox_sts[0]); return status; } diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c index 325db1f2c09..3bf418fbd43 100644 --- a/drivers/scsi/qla4xxx/ql4_nvram.c +++ b/drivers/scsi/qla4xxx/ql4_nvram.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h index dba0514d1c7..e97d79ff16f 100644 --- a/drivers/scsi/qla4xxx/ql4_nvram.h +++ b/drivers/scsi/qla4xxx/ql4_nvram.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 71d3d234f52..9dbdb4be2d8 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -14,6 +14,7 @@ #include <asm-generic/io-64-nonatomic-lo-hi.h> +#define TIMEOUT_100_MS 100 #define MASK(n) DMA_BIT_MASK(n) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) @@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) return 0; } +/** + * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory + * @ha: Pointer to adapter structure + * @addr: Flash address to write to + * @data: Data to be written + * @count: word_count to be written + * + * Return: On success return QLA_SUCCESS + * On error return QLA_ERROR + **/ +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, + uint32_t *data, uint32_t count) +{ + int i, j; + uint32_t agt_ctrl; + unsigned long flags; + int ret_val = QLA_SUCCESS; + + /* Only 128-bit aligned access */ + if (addr & 0xF) { + ret_val = QLA_ERROR; + goto exit_ms_mem_write; + } + + write_lock_irqsave(&ha->hw_lock, flags); + + /* Write address */ + ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + for (i = 0; i < count; i++, addr += 16) { + if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, + QLA8XXX_ADDR_QDR_NET_MAX)) || + (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, + QLA8XXX_ADDR_DDR_NET_MAX)))) { + ret_val = QLA_ERROR; + goto exit_ms_mem_write_unlock; + } + + ret_val = ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_ADDR_LO, + addr); + /* Write data */ + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_LO, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_HI, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_ULO, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_UHI, + *data++); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + /* Check write status */ + ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_ENABLE); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_START); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + for (j = 0; j < MAX_CTL_CHECK; j++) { + ret_val = ha->isp_ops->rd_reg_indirect(ha, + MD_MIU_TEST_AGT_CTRL, + &agt_ctrl); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", + __func__); + goto exit_ms_mem_write_unlock; + } + if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) + break; + } + + /* Status check failed */ + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", + __func__); + ret_val = QLA_ERROR; + goto exit_ms_mem_write_unlock; + } + } + +exit_ms_mem_write_unlock: + write_unlock_irqrestore(&ha->hw_lock, flags); + +exit_ms_mem_write: + return ret_val; +} + static int qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) { @@ -1514,11 +1621,11 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_active |= (1 << ha->func_num); else drv_active |= (1 << (ha->func_num * 4)); @@ -1536,11 +1643,11 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha) drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_active &= ~(1 << (ha->func_num)); else drv_active &= ~(1 << (ha->func_num * 4)); @@ -1559,11 +1666,11 @@ inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) rval = drv_state & (1 << ha->func_num); else rval = drv_state & (1 << (ha->func_num * 4)); @@ -1581,11 +1688,11 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_state |= (1 << ha->func_num); else drv_state |= (1 << (ha->func_num * 4)); @@ -1602,11 +1709,11 @@ void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha) drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) drv_state &= ~(1 << ha->func_num); else drv_state &= ~(1 << (ha->func_num * 4)); @@ -1624,11 +1731,11 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha) qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE); /* - * For ISP8324, drv_active register has 1 bit per function, + * For ISP8324 and ISP8042, drv_active register has 1 bit per function, * shift 1 by func_num to set a bit for the function. * For ISP8022, drv_active has 4 bits per function. */ - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) qsnt_state |= (1 << ha->func_num); else qsnt_state |= (2 << (ha->func_num * 4)); @@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha) qla4_82xx_rom_unlock(ha); } +static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha, + uint32_t addr1, uint32_t mask) +{ + unsigned long timeout; + uint32_t rval = QLA_SUCCESS; + uint32_t temp; + + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + + if (time_after_eq(jiffies, timeout)) { + ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n"); + return QLA_ERROR; + } + } while (1); + + return rval; +} + +uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1, + uint32_t addr3, uint32_t mask, uint32_t addr, + uint32_t *data_ptr) +{ + int rval = QLA_SUCCESS; + uint32_t temp; + uint32_t data; + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_rd_reg; + + temp = (0x40000000 | addr); + ha->isp_ops->wr_reg_indirect(ha, addr1, temp); + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_rd_reg; + + ha->isp_ops->rd_reg_indirect(ha, addr3, &data); + *data_ptr = data; + +exit_ipmdio_rd_reg: + return rval; +} + + +static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha, + uint32_t addr1, + uint32_t addr2, + uint32_t addr3, + uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + uint32_t rval = QLA_SUCCESS; + + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp); + if ((temp & 0x1) != 1) + break; + if (time_after_eq(jiffies, timeout)) { + ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n"); + return QLA_ERROR; + } + } while (1); + + return rval; +} + +static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha, + uint32_t addr1, uint32_t addr3, + uint32_t mask, uint32_t addr, + uint32_t value) +{ + int rval = QLA_SUCCESS; + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_wr_reg; + + ha->isp_ops->wr_reg_indirect(ha, addr3, value); + ha->isp_ops->wr_reg_indirect(ha, addr1, addr); + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_wr_reg; + +exit_ipmdio_wr_reg: + return rval; +} + static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) @@ -1737,6 +1939,208 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, *d_ptr = data_ptr; } +static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha) +{ + int rval = QLA_SUCCESS; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); + + /* Read the pex-dma's command-status-and-control register. */ + rval = ha->isp_ops->rd_reg_indirect(ha, + (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + + if (rval) + return QLA_ERROR; + + /* Check if requested pex-dma engine is available. */ + if (cmd_sts_and_cntrl & BIT_31) + return QLA_SUCCESS; + else + return QLA_ERROR; +} + +static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha, + struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr) +{ + int rval = QLA_SUCCESS, wait = 0; + uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0; + uint64_t dma_base_addr = 0; + struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL; + + tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *) + ha->fw_dump_tmplt_hdr; + dma_eng_num = + tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX]; + dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS + + (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET); + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW, + m_hdr->desc_card_addr); + if (rval) + goto error_exit; + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0); + if (rval) + goto error_exit; + + rval = ha->isp_ops->wr_reg_indirect(ha, + dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL, + m_hdr->start_dma_cmd); + if (rval) + goto error_exit; + + /* Wait for dma operation to complete. */ + for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) { + rval = ha->isp_ops->rd_reg_indirect(ha, + (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL), + &cmd_sts_and_cntrl); + if (rval) + goto error_exit; + + if ((cmd_sts_and_cntrl & BIT_1) == 0) + break; + else + udelay(10); + } + + /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */ + if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) { + rval = QLA_ERROR; + goto error_exit; + } + +error_exit: + return rval; +} + +static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int rval = QLA_SUCCESS; + struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL; + uint32_t size, read_size; + uint8_t *data_ptr = (uint8_t *)*d_ptr; + void *rdmem_buffer = NULL; + dma_addr_t rdmem_dma; + struct qla4_83xx_pex_dma_descriptor dma_desc; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__)); + + rval = qla4_83xx_check_dma_engine_state(ha); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: DMA engine not available. Fallback to rdmem-read.\n", + __func__)); + return QLA_ERROR; + } + + m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr; + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, + QLA83XX_PEX_DMA_READ_SIZE, + &rdmem_dma, GFP_KERNEL); + if (!rdmem_buffer) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Unable to allocate rdmem dma buffer\n", + __func__)); + return QLA_ERROR; + } + + /* Prepare pex-dma descriptor to be written to MS memory. */ + /* dma-desc-cmd layout: + * 0-3: dma-desc-cmd 0-3 + * 4-7: pcid function number + * 8-15: dma-desc-cmd 8-15 + */ + dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f); + dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4); + dma_desc.dma_bus_addr = rdmem_dma; + + size = 0; + read_size = 0; + /* + * Perform rdmem operation using pex-dma. + * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE. + */ + while (read_size < m_hdr->read_data_size) { + if (m_hdr->read_data_size - read_size >= + QLA83XX_PEX_DMA_READ_SIZE) + size = QLA83XX_PEX_DMA_READ_SIZE; + else { + size = (m_hdr->read_data_size - read_size); + + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, + QLA83XX_PEX_DMA_READ_SIZE, + rdmem_buffer, rdmem_dma); + + rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size, + &rdmem_dma, + GFP_KERNEL); + if (!rdmem_buffer) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Unable to allocate rdmem dma buffer\n", + __func__)); + return QLA_ERROR; + } + dma_desc.dma_bus_addr = rdmem_dma; + } + + dma_desc.src_addr = m_hdr->read_addr + read_size; + dma_desc.cmd.read_data_size = size; + + /* Prepare: Write pex-dma descriptor to MS memory. */ + rval = qla4_8xxx_ms_mem_write_128b(ha, + (uint64_t)m_hdr->desc_card_addr, + (uint32_t *)&dma_desc, + (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, + "%s: Error writing rdmem-dma-init to MS !!!\n", + __func__); + goto error_exit; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n", + __func__, size)); + /* Execute: Start pex-dma operation. */ + rval = qla4_83xx_start_pex_dma(ha, m_hdr); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi(%ld): start-pex-dma failed rval=0x%x\n", + ha->host_no, rval)); + goto error_exit; + } + + memcpy(data_ptr, rdmem_buffer, size); + data_ptr += size; + read_size += size; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__)); + + *d_ptr = (uint32_t *)data_ptr; + +error_exit: + if (rdmem_buffer) + dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer, + rdmem_dma); + + return rval; +} + static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) @@ -2068,7 +2472,7 @@ static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha, #define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 #define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 -static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, +static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { @@ -2089,7 +2493,7 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, if (r_addr & 0xf) { DEBUG2(ql4_printk(KERN_INFO, ha, - "[%s]: Read addr 0x%x not 16 bytes alligned\n", + "[%s]: Read addr 0x%x not 16 bytes aligned\n", __func__, r_addr)); return QLA_ERROR; } @@ -2150,6 +2554,21 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, return QLA_SUCCESS; } +static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t *data_ptr = *d_ptr; + int rval = QLA_SUCCESS; + + rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) + rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, + &data_ptr); + *d_ptr = data_ptr; + return rval; +} + static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, int index) @@ -2159,6 +2578,11 @@ static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha, "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", ha->host_no, index, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask)); + /* If driver encounters a new entry type that it cannot process, + * it should just skip the entry and adjust the total buffer size by + * from subtracting the skipped bytes from it + */ + ha->fw_dump_skip_size += entry_hdr->entry_capture_size; } /* ISP83xx functions to process new minidump entries... */ @@ -2211,6 +2635,227 @@ exit_process_pollrd: return rval; } +static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int loop_cnt; + uint32_t addr1, addr2, value, data, temp, wrval; + uint8_t stride, stride2; + uint16_t count; + uint32_t poll, mask, data_size, modify_mask; + uint32_t wait_count = 0; + uint32_t *data_ptr = *d_ptr; + struct qla8044_minidump_entry_rddfe *rddfe; + uint32_t rval = QLA_SUCCESS; + + rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; + addr1 = le32_to_cpu(rddfe->addr_1); + value = le32_to_cpu(rddfe->value); + stride = le32_to_cpu(rddfe->stride); + stride2 = le32_to_cpu(rddfe->stride2); + count = le32_to_cpu(rddfe->count); + + poll = le32_to_cpu(rddfe->poll); + mask = le32_to_cpu(rddfe->mask); + modify_mask = le32_to_cpu(rddfe->modify_mask); + data_size = le32_to_cpu(rddfe->data_size); + + addr2 = addr1 + stride; + + for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { + ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value)); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } else { + ha->isp_ops->rd_reg_indirect(ha, addr2, &temp); + temp = temp & modify_mask; + temp = (temp | ((loop_cnt << 16) | loop_cnt)); + wrval = ((temp << 16) | temp); + + ha->isp_ops->wr_reg_indirect(ha, addr2, wrval); + ha->isp_ops->wr_reg_indirect(ha, addr1, value); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", + __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } + + ha->isp_ops->wr_reg_indirect(ha, addr1, + ((0x40000000 | value) + + stride2)); + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", + __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } + + ha->isp_ops->rd_reg_indirect(ha, addr2, &data); + + *data_ptr++ = cpu_to_le32(wrval); + *data_ptr++ = cpu_to_le32(data); + } + } + + *d_ptr = data_ptr; +exit_process_rddfe: + return rval; +} + +static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int rval = QLA_SUCCESS; + uint32_t addr1, addr2, value1, value2, data, selval; + uint8_t stride1, stride2; + uint32_t addr3, addr4, addr5, addr6, addr7; + uint16_t count, loop_cnt; + uint32_t poll, mask; + uint32_t *data_ptr = *d_ptr; + struct qla8044_minidump_entry_rdmdio *rdmdio; + + rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; + addr1 = le32_to_cpu(rdmdio->addr_1); + addr2 = le32_to_cpu(rdmdio->addr_2); + value1 = le32_to_cpu(rdmdio->value_1); + stride1 = le32_to_cpu(rdmdio->stride_1); + stride2 = le32_to_cpu(rdmdio->stride_2); + count = le32_to_cpu(rdmdio->count); + + poll = le32_to_cpu(rdmdio->poll); + mask = le32_to_cpu(rdmdio->mask); + value2 = le32_to_cpu(rdmdio->value_2); + + addr3 = addr1 + stride1; + + for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { + rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, + addr3, mask); + if (rval) + goto exit_process_rdmdio; + + addr4 = addr2 - stride1; + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4, + value2); + if (rval) + goto exit_process_rdmdio; + + addr5 = addr2 - (2 * stride1); + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5, + value1); + if (rval) + goto exit_process_rdmdio; + + addr6 = addr2 - (3 * stride1); + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, + addr6, 0x2); + if (rval) + goto exit_process_rdmdio; + + rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, + addr3, mask); + if (rval) + goto exit_process_rdmdio; + + addr7 = addr2 - (4 * stride1); + rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, + mask, addr7, &data); + if (rval) + goto exit_process_rdmdio; + + selval = (value2 << 18) | (value1 << 2) | 2; + + stride2 = le32_to_cpu(rdmdio->stride_2); + *data_ptr++ = cpu_to_le32(selval); + *data_ptr++ = cpu_to_le32(data); + + value1 = value1 + stride2; + *d_ptr = data_ptr; + } + +exit_process_rdmdio: + return rval; +} + +static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr1, addr2, value1, value2, poll, mask, r_value; + struct qla8044_minidump_entry_pollwr *pollwr_hdr; + uint32_t wait_count = 0; + uint32_t rval = QLA_SUCCESS; + + pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; + addr1 = le32_to_cpu(pollwr_hdr->addr_1); + addr2 = le32_to_cpu(pollwr_hdr->addr_2); + value1 = le32_to_cpu(pollwr_hdr->value_1); + value2 = le32_to_cpu(pollwr_hdr->value_2); + + poll = le32_to_cpu(pollwr_hdr->poll); + mask = le32_to_cpu(pollwr_hdr->mask); + + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); + rval = QLA_ERROR; + goto exit_process_pollwr; + } + + ha->isp_ops->wr_reg_indirect(ha, addr2, value2); + ha->isp_ops->wr_reg_indirect(ha, addr1, value1); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + +exit_process_pollwr: + return rval; +} + static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) @@ -2366,6 +3011,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) uint64_t now; uint32_t timestamp; + ha->fw_dump_skip_size = 0; if (!ha->fw_dump) { ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n", __func__, ha->host_no); @@ -2398,13 +3044,13 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) (((uint8_t *)ha->fw_dump_tmplt_hdr) + tmplt_hdr->first_entry_offset); - if (is_qla8032(ha)) + if (is_qla8032(ha) || is_qla8042(ha)) tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] = tmplt_hdr->ocm_window_reg[ha->func_num]; /* Walk through the entry headers - validate/perform required action */ for (i = 0; i < num_entry_hdr; i++) { - if (data_collected >= ha->fw_dump_size) { + if (data_collected > ha->fw_dump_size) { ql4_printk(KERN_INFO, ha, "Data collected: [0x%x], Total Dump size: [0x%x]\n", data_collected, ha->fw_dump_size); @@ -2455,7 +3101,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) if (is_qla8022(ha)) { qla4_82xx_minidump_process_rdrom(ha, entry_hdr, &data_ptr); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { rval = qla4_83xx_minidump_process_rdrom(ha, entry_hdr, &data_ptr); @@ -2496,7 +3142,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) &data_ptr); break; case QLA83XX_POLLRD: - if (!is_qla8032(ha)) { + if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } @@ -2506,7 +3152,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; case QLA83XX_RDMUX2: - if (!is_qla8032(ha)) { + if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } @@ -2514,7 +3160,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) &data_ptr); break; case QLA83XX_POLLRDMWR: - if (!is_qla8032(ha)) { + if (is_qla8022(ha)) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } @@ -2523,15 +3169,31 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; + case QLA8044_RDDFE: + rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8044_RDMDIO: + rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8044_POLLWR: + rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; case QLA8XXX_RDNOP: default: qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; } - data_collected = (uint8_t *)data_ptr - - ((uint8_t *)((uint8_t *)ha->fw_dump + - ha->fw_dump_tmplt_size)); + data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump; skip_nxt_entry: /* next entry in the template */ entry_hdr = (struct qla8xxx_minidump_entry_hdr *) @@ -2539,10 +3201,11 @@ skip_nxt_entry: entry_hdr->entry_size); } - if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) { + if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) { ql4_printk(KERN_INFO, ha, "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", data_collected, ha->fw_dump_size); + rval = QLA_ERROR; goto md_failed; } @@ -2597,63 +3260,35 @@ void qla4_8xxx_get_minidump(struct scsi_qla_host *ha) int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) { int rval = QLA_ERROR; - int i, timeout; - uint32_t old_count, count, idc_ctrl; - int need_reset = 0, peg_stuck = 1; + int i; + uint32_t old_count, count; + int need_reset = 0; need_reset = ha->isp_ops->need_reset(ha); - old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); - - for (i = 0; i < 10; i++) { - timeout = msleep_interruptible(200); - if (timeout) { - qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, - QLA8XXX_DEV_FAILED); - return rval; - } - - count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); - if (count != old_count) - peg_stuck = 0; - } if (need_reset) { /* We are trying to perform a recovery here. */ - if (peg_stuck) + if (test_bit(AF_FW_RECOVERY, &ha->flags)) ha->isp_ops->rom_lock_recovery(ha); - goto dev_initialize; } else { - /* Start of day for this ha context. */ - if (peg_stuck) { - /* Either we are the first or recovery in progress. */ - ha->isp_ops->rom_lock_recovery(ha); - goto dev_initialize; - } else { - /* Firmware already running. */ - rval = QLA_SUCCESS; - goto dev_ready; + old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); + for (i = 0; i < 10; i++) { + msleep(200); + count = qla4_8xxx_rd_direct(ha, + QLA8XXX_PEG_ALIVE_COUNTER); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } } + ha->isp_ops->rom_lock_recovery(ha); } -dev_initialize: /* set to DEV_INITIALIZING */ ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n"); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); - /* - * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after - * device goes to INIT state. - */ - if (is_qla8032(ha)) { - idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); - if (idc_ctrl & GRACEFUL_RESET_BIT1) { - qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, - (idc_ctrl & ~GRACEFUL_RESET_BIT1)); - set_bit(AF_83XX_NO_FW_DUMP, &ha->flags); - } - } - ha->isp_ops->idc_unlock(ha); if (is_qla8022(ha)) @@ -2846,7 +3481,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha) * If we are the first driver to load and * ql4xdontresethba is not set, clear IDC_CTRL BIT0. */ - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba) qla4_83xx_clear_idc_dontreset(ha); @@ -2854,7 +3489,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha) if (is_qla8022(ha)) { qla4_82xx_set_idc_ver(ha); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { rval = qla4_83xx_set_idc_ver(ha); if (rval == QLA_ERROR) qla4_8xxx_clear_drv_active(ha); @@ -2922,11 +3557,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha) break; case QLA8XXX_DEV_NEED_RESET: /* - * For ISP8324, if NEED_RESET is set by any driver, - * it should be honored, irrespective of IDC_CTRL - * DONTRESET_BIT0 + * For ISP8324 and ISP8042, if NEED_RESET is set by any + * driver, it should be honored, irrespective of + * IDC_CTRL DONTRESET_BIT0 */ - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_need_reset_handler(ha); } else if (is_qla8022(ha)) { if (!ql4xdontresethba) { @@ -2976,7 +3611,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha) int retval; /* clear the interrupt */ - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, &ha->qla4_83xx_reg->risc_intr); readl(&ha->qla4_83xx_reg->risc_intr); } else if (is_qla8022(ha)) { @@ -2986,6 +3621,10 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha) retval = qla4_8xxx_device_state_handler(ha); + /* Initialize request and response queues. */ + if (retval == QLA_SUCCESS) + qla4xxx_init_rings(ha); + if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) retval = qla4xxx_request_irqs(ha); @@ -3094,7 +3733,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr) if (is_qla8022(ha)) { qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, flt_addr << 2, OPTROM_BURST_SIZE); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { status = qla4_83xx_flash_read_u32(ha, flt_addr << 2, (uint8_t *)ha->request_ring, 0x400); @@ -3154,6 +3793,10 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr) hw->flt_region_chap = start; hw->flt_chap_size = le32_to_cpu(region->size); break; + case FLT_REG_ISCSI_DDB: + hw->flt_region_ddb = start; + hw->flt_ddb_size = le32_to_cpu(region->size); + break; } } goto done; @@ -3166,14 +3809,19 @@ no_flash_data: hw->flt_region_boot = FA_BOOT_CODE_ADDR_82; hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82; hw->flt_region_fw = FA_RISC_CODE_ADDR_82; - hw->flt_region_chap = FA_FLASH_ISCSI_CHAP; + hw->flt_region_chap = FA_FLASH_ISCSI_CHAP >> 2; hw->flt_chap_size = FA_FLASH_CHAP_SIZE; + hw->flt_region_ddb = FA_FLASH_ISCSI_DDB >> 2; + hw->flt_ddb_size = FA_FLASH_DDB_SIZE; done: - DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x " - "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt, - hw->flt_region_fdt, hw->flt_region_boot, hw->flt_region_bootload, - hw->flt_region_fw)); + DEBUG2(ql4_printk(KERN_INFO, ha, + "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n", + loc, hw->flt_region_flt, hw->flt_region_fdt, + hw->flt_region_boot, hw->flt_region_bootload, + hw->flt_region_fw, hw->flt_region_chap, + hw->flt_chap_size, hw->flt_region_ddb, + hw->flt_ddb_size)); } static void @@ -3317,7 +3965,7 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha) if (is_qla8022(ha)) { qla4_82xx_get_fdt_info(ha); qla4_82xx_get_idc_param(ha); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_get_idc_param(ha); } @@ -3427,7 +4075,7 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) } /* Make sure we receive the minimum required data to cache internally */ - if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) < + if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) < offsetof(struct mbx_sys_info, reserved)) { DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive" " error (%x)\n", ha->host_no, __func__, mbox_sts[4])); @@ -3604,3 +4252,24 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha) msix_out: return ret; } + +int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha) +{ + int status = QLA_SUCCESS; + + /* Dont retry adapter initialization if IRQ allocation failed */ + if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) { + ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n", + __func__); + status = QLA_ERROR; + goto exit_init_adapter_failure; + } + + /* Since interrupts are registered in start_firmware for + * 8xxx, release them here if initialize_adapter fails + * and retry adapter initialization */ + qla4xxx_free_irqs(ha); + +exit_init_adapter_failure: + return status; +} diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index 9dc0bbfe50d..337d9fcf641 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -858,6 +858,9 @@ struct crb_addr_pair { #define QLA83XX_POLLRD 35 #define QLA83XX_RDMUX2 36 #define QLA83XX_POLLRDMWR 37 +#define QLA8044_RDDFE 38 +#define QLA8044_RDMDIO 39 +#define QLA8044_POLLWR 40 #define QLA8XXX_RDROM 71 #define QLA8XXX_RDMEM 72 #define QLA8XXX_CNTRL 98 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 6142729167f..32020637620 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -1,6 +1,6 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ @@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo, " Target Session Recovery Timeout.\n" "\t\t Default: 120 sec."); -int ql4xmdcapmask = 0x1F; +int ql4xmdcapmask = 0; module_param(ql4xmdcapmask, int, S_IRUGO); MODULE_PARM_DESC(ql4xmdcapmask, " Set the Minidump driver capture mask level.\n" - "\t\t Default is 0x1F.\n" - "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F"); + "\t\t Default is 0 (firmware default capture mask)\n" + "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); int ql4xenablemd = 1; module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); @@ -149,6 +149,9 @@ static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf); static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, + int len); +static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len); /* * SCSI host template entry points @@ -166,6 +169,26 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason); +/* + * iSCSI Flash DDB sysfs entry points + */ +static int +qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn, + void *data, int len); +static int +qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, + int param, char *buf); +static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, + int len); +static int +qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess); +static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn); +static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn); +static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess); + static struct qla4_8xxx_legacy_intr_set legacy_intr[] = QLA82XX_LEGACY_INTR_CONFIG; @@ -232,6 +255,15 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { .send_ping = qla4xxx_send_ping, .get_chap = qla4xxx_get_chap_list, .delete_chap = qla4xxx_delete_chap, + .set_chap = qla4xxx_set_chap_entry, + .get_flashnode_param = qla4xxx_sysfs_ddb_get_param, + .set_flashnode_param = qla4xxx_sysfs_ddb_set_param, + .new_flashnode = qla4xxx_sysfs_ddb_add, + .del_flashnode = qla4xxx_sysfs_ddb_delete, + .login_flashnode = qla4xxx_sysfs_ddb_login, + .logout_flashnode = qla4xxx_sysfs_ddb_logout, + .logout_flashnode_sid = qla4xxx_sysfs_ddb_logout_sid, + .get_host_stats = qla4xxx_get_host_stats, }; static struct scsi_transport_template *qla4xxx_scsi_transport; @@ -351,6 +383,45 @@ static umode_t qla4_attr_is_visible(int param_type, int param) case ISCSI_PARAM_PASSWORD: case ISCSI_PARAM_USERNAME_IN: case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_AUTO_SND_TGT_DISABLE: + case ISCSI_PARAM_DISCOVERY_SESS: + case ISCSI_PARAM_PORTAL_TYPE: + case ISCSI_PARAM_CHAP_AUTH_EN: + case ISCSI_PARAM_DISCOVERY_LOGOUT_EN: + case ISCSI_PARAM_BIDI_CHAP_EN: + case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_PARAM_DEF_TIME2WAIT: + case ISCSI_PARAM_DEF_TIME2RETAIN: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_MAX_SEGMENT_SIZE: + case ISCSI_PARAM_TCP_TIMESTAMP_STAT: + case ISCSI_PARAM_TCP_WSF_DISABLE: + case ISCSI_PARAM_TCP_NAGLE_DISABLE: + case ISCSI_PARAM_TCP_TIMER_SCALE: + case ISCSI_PARAM_TCP_TIMESTAMP_EN: + case ISCSI_PARAM_TCP_XMIT_WSF: + case ISCSI_PARAM_TCP_RECV_WSF: + case ISCSI_PARAM_IP_FRAGMENT_DISABLE: + case ISCSI_PARAM_IPV4_TOS: + case ISCSI_PARAM_IPV6_TC: + case ISCSI_PARAM_IPV6_FLOW_LABEL: + case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6: + case ISCSI_PARAM_KEEPALIVE_TMO: + case ISCSI_PARAM_LOCAL_PORT: + case ISCSI_PARAM_ISID: + case ISCSI_PARAM_TSID: + case ISCSI_PARAM_DEF_TASKMGMT_TMO: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_STATSN: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_DISCOVERY_PARENT_IDX: + case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: + case ISCSI_PARAM_LOCAL_IPADDR: return S_IRUGO; default: return 0; @@ -372,6 +443,127 @@ static umode_t qla4_attr_is_visible(int param_type, int param) case ISCSI_NET_PARAM_VLAN_ENABLED: case ISCSI_NET_PARAM_MTU: case ISCSI_NET_PARAM_PORT: + case ISCSI_NET_PARAM_IPADDR_STATE: + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: + case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + case ISCSI_NET_PARAM_TCP_WSF: + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + case ISCSI_NET_PARAM_CACHE_ID: + case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: + case ISCSI_NET_PARAM_IPV4_TOS_EN: + case ISCSI_NET_PARAM_IPV4_TOS: + case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: + case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: + case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: + case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: + case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: + case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: + case ISCSI_NET_PARAM_REDIRECT_EN: + case ISCSI_NET_PARAM_IPV4_TTL: + case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: + case ISCSI_NET_PARAM_IPV6_MLD_EN: + case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: + case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: + case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: + case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: + case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: + case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: + case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: + case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: + return S_IRUGO; + default: + return 0; + } + case ISCSI_IFACE_PARAM: + switch (param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + case ISCSI_IFACE_PARAM_HDRDGST_EN: + case ISCSI_IFACE_PARAM_DATADGST_EN: + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + case ISCSI_IFACE_PARAM_ERL: + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + case ISCSI_IFACE_PARAM_FIRST_BURST: + case ISCSI_IFACE_PARAM_MAX_R2T: + case ISCSI_IFACE_PARAM_MAX_BURST: + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + case ISCSI_IFACE_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_FLASHNODE_PARAM: + switch (param) { + case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: + case ISCSI_FLASHNODE_PORTAL_TYPE: + case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: + case ISCSI_FLASHNODE_DISCOVERY_SESS: + case ISCSI_FLASHNODE_ENTRY_EN: + case ISCSI_FLASHNODE_HDR_DGST_EN: + case ISCSI_FLASHNODE_DATA_DGST_EN: + case ISCSI_FLASHNODE_IMM_DATA_EN: + case ISCSI_FLASHNODE_INITIAL_R2T_EN: + case ISCSI_FLASHNODE_DATASEQ_INORDER: + case ISCSI_FLASHNODE_PDU_INORDER: + case ISCSI_FLASHNODE_CHAP_AUTH_EN: + case ISCSI_FLASHNODE_SNACK_REQ_EN: + case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: + case ISCSI_FLASHNODE_BIDI_CHAP_EN: + case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_FLASHNODE_ERL: + case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: + case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: + case ISCSI_FLASHNODE_TCP_WSF_DISABLE: + case ISCSI_FLASHNODE_TCP_TIMER_SCALE: + case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: + case ISCSI_FLASHNODE_IP_FRAG_DISABLE: + case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: + case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: + case ISCSI_FLASHNODE_FIRST_BURST: + case ISCSI_FLASHNODE_DEF_TIME2WAIT: + case ISCSI_FLASHNODE_DEF_TIME2RETAIN: + case ISCSI_FLASHNODE_MAX_R2T: + case ISCSI_FLASHNODE_KEEPALIVE_TMO: + case ISCSI_FLASHNODE_ISID: + case ISCSI_FLASHNODE_TSID: + case ISCSI_FLASHNODE_PORT: + case ISCSI_FLASHNODE_MAX_BURST: + case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: + case ISCSI_FLASHNODE_IPADDR: + case ISCSI_FLASHNODE_ALIAS: + case ISCSI_FLASHNODE_REDIRECT_IPADDR: + case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: + case ISCSI_FLASHNODE_LOCAL_PORT: + case ISCSI_FLASHNODE_IPV4_TOS: + case ISCSI_FLASHNODE_IPV6_TC: + case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: + case ISCSI_FLASHNODE_NAME: + case ISCSI_FLASHNODE_TPGT: + case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: + case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: + case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: + case ISCSI_FLASHNODE_TCP_XMIT_WSF: + case ISCSI_FLASHNODE_TCP_RECV_WSF: + case ISCSI_FLASHNODE_CHAP_OUT_IDX: + case ISCSI_FLASHNODE_USERNAME: + case ISCSI_FLASHNODE_PASSWORD: + case ISCSI_FLASHNODE_STATSN: + case ISCSI_FLASHNODE_EXP_STATSN: + case ISCSI_FLASHNODE_IS_BOOT_TGT: return S_IRUGO; default: return 0; @@ -381,6 +573,154 @@ static umode_t qla4_attr_is_visible(int param_type, int param) return 0; } +/** + * qla4xxx_create chap_list - Create CHAP list from FLASH + * @ha: pointer to adapter structure + * + * Read flash and make a list of CHAP entries, during login when a CHAP entry + * is received, it will be checked in this list. If entry exist then the CHAP + * entry index is set in the DDB. If CHAP entry does not exist in this list + * then a new entry is added in FLASH in CHAP table and the index obtained is + * used in the DDB. + **/ +static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) +{ + int rval = 0; + uint8_t *chap_flash_data = NULL; + uint32_t offset; + dma_addr_t chap_dma; + uint32_t chap_size = 0; + + if (is_qla40XX(ha)) + chap_size = MAX_CHAP_ENTRIES_40XX * + sizeof(struct ql4_chap_table); + else /* Single region contains CHAP info for both + * ports which is divided into half for each port. + */ + chap_size = ha->hw.flt_chap_size / 2; + + chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, + &chap_dma, GFP_KERNEL); + if (!chap_flash_data) { + ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); + return; + } + + if (is_qla40XX(ha)) { + offset = FLASH_CHAP_OFFSET; + } else { + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + if (ha->port_num == 1) + offset += chap_size; + } + + rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); + if (rval != QLA_SUCCESS) + goto exit_chap_list; + + if (ha->chap_list == NULL) + ha->chap_list = vmalloc(chap_size); + if (ha->chap_list == NULL) { + ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); + goto exit_chap_list; + } + + memset(ha->chap_list, 0, chap_size); + memcpy(ha->chap_list, chap_flash_data, chap_size); + +exit_chap_list: + dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); +} + +static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, + int16_t chap_index, + struct ql4_chap_table **chap_entry) +{ + int rval = QLA_ERROR; + int max_chap_entries; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); + rval = QLA_ERROR; + goto exit_get_chap; + } + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (chap_index > max_chap_entries) { + ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); + rval = QLA_ERROR; + goto exit_get_chap; + } + + *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; + if ((*chap_entry)->cookie != + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + rval = QLA_ERROR; + *chap_entry = NULL; + } else { + rval = QLA_SUCCESS; + } + +exit_get_chap: + return rval; +} + +/** + * qla4xxx_find_free_chap_index - Find the first free chap index + * @ha: pointer to adapter structure + * @chap_index: CHAP index to be returned + * + * Find the first free chap index available in the chap table + * + * Note: Caller should acquire the chap lock before getting here. + **/ +static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, + uint16_t *chap_index) +{ + int i, rval; + int free_index = -1; + int max_chap_entries = 0; + struct ql4_chap_table *chap_table; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); + rval = QLA_ERROR; + goto exit_find_chap; + } + + for (i = 0; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + + if ((chap_table->cookie != + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && + (i > MAX_RESRV_CHAP_IDX)) { + free_index = i; + break; + } + } + + if (free_index != -1) { + *chap_index = free_index; + rval = QLA_SUCCESS; + } else { + rval = QLA_ERROR; + } + +exit_find_chap: + return rval; +} + static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, uint32_t *num_entries, char *buf) { @@ -391,7 +731,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, int valid_chap_entries = 0; int ret = 0, i; - if (is_qla8022(ha)) + if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else @@ -405,6 +745,8 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, goto exit_get_chap_list; } + qla4xxx_create_chap_list(ha); + chap_rec = (struct iscsi_chap_rec *) buf; mutex_lock(&ha->chap_sem); for (i = chap_tbl_idx; i < max_chap_entries; i++) { @@ -495,7 +837,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) memset(chap_table, 0, sizeof(struct ql4_chap_table)); - if (is_qla8022(ha)) + if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else @@ -564,113 +906,754 @@ exit_delete_chap: return ret; } +/** + * qla4xxx_set_chap_entry - Make chap entry with given information + * @shost: pointer to host + * @data: chap info - credentials, index and type to make chap entry + * @len: length of data + * + * Add or update chap entry with the given information + **/ +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_chap_rec chap_rec; + struct ql4_chap_table *chap_entry = NULL; + struct iscsi_param_info *param_info; + struct nlattr *attr; + int max_chap_entries = 0; + int type; + int rem = len; + int rc = 0; + int size; + + memset(&chap_rec, 0, sizeof(chap_rec)); + + nla_for_each_attr(attr, data, len, rem) { + param_info = nla_data(attr); + + switch (param_info->param) { + case ISCSI_CHAP_PARAM_INDEX: + chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; + break; + case ISCSI_CHAP_PARAM_CHAP_TYPE: + chap_rec.chap_type = param_info->value[0]; + break; + case ISCSI_CHAP_PARAM_USERNAME: + size = min_t(size_t, sizeof(chap_rec.username), + param_info->len); + memcpy(chap_rec.username, param_info->value, size); + break; + case ISCSI_CHAP_PARAM_PASSWORD: + size = min_t(size_t, sizeof(chap_rec.password), + param_info->len); + memcpy(chap_rec.password, param_info->value, size); + break; + case ISCSI_CHAP_PARAM_PASSWORD_LEN: + chap_rec.password_length = param_info->value[0]; + break; + default: + ql4_printk(KERN_ERR, ha, + "%s: No such sysfs attribute\n", __func__); + rc = -ENOSYS; + goto exit_set_chap; + }; + } + + if (chap_rec.chap_type == CHAP_TYPE_IN) + type = BIDI_CHAP; + else + type = LOCAL_CHAP; + + if (is_qla80XX(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + mutex_lock(&ha->chap_sem); + if (chap_rec.chap_tbl_idx < max_chap_entries) { + rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, + &chap_entry); + if (!rc) { + if (!(type == qla4xxx_get_chap_type(chap_entry))) { + ql4_printk(KERN_INFO, ha, + "Type mismatch for CHAP entry %d\n", + chap_rec.chap_tbl_idx); + rc = -EINVAL; + goto exit_unlock_chap; + } + + /* If chap index is in use then don't modify it */ + rc = qla4xxx_is_chap_active(shost, + chap_rec.chap_tbl_idx); + if (rc) { + ql4_printk(KERN_INFO, ha, + "CHAP entry %d is in use\n", + chap_rec.chap_tbl_idx); + rc = -EBUSY; + goto exit_unlock_chap; + } + } + } else { + rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); + if (rc) { + ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); + rc = -EBUSY; + goto exit_unlock_chap; + } + } + + rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, + chap_rec.chap_tbl_idx, type); + +exit_unlock_chap: + mutex_unlock(&ha->chap_sem); + +exit_set_chap: + return rc; +} + + +static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_offload_host_stats *host_stats = NULL; + int host_stats_size; + int ret = 0; + int ddb_idx = 0; + struct ql_iscsi_stats *ql_iscsi_stats = NULL; + int stats_size; + dma_addr_t iscsi_stats_dma; + + DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); + + host_stats_size = sizeof(struct iscsi_offload_host_stats); + + if (host_stats_size != len) { + ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", + __func__, len, host_stats_size); + ret = -EINVAL; + goto exit_host_stats; + } + host_stats = (struct iscsi_offload_host_stats *)buf; + + if (!buf) { + ret = -ENOMEM; + goto exit_host_stats; + } + + stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); + + ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, + &iscsi_stats_dma, GFP_KERNEL); + if (!ql_iscsi_stats) { + ql4_printk(KERN_ERR, ha, + "Unable to allocate memory for iscsi stats\n"); + goto exit_host_stats; + } + + ret = qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, + iscsi_stats_dma); + if (ret != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, + "Unable to retrieve iscsi stats\n"); + goto exit_host_stats; + } + host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); + host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); + host_stats->mactx_multicast_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); + host_stats->mactx_broadcast_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); + host_stats->mactx_pause_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); + host_stats->mactx_control_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); + host_stats->mactx_deferral = + le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); + host_stats->mactx_excess_deferral = + le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); + host_stats->mactx_late_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); + host_stats->mactx_abort = le64_to_cpu(ql_iscsi_stats->mac_tx_abort); + host_stats->mactx_single_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); + host_stats->mactx_multiple_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); + host_stats->mactx_collision = + le64_to_cpu(ql_iscsi_stats->mac_tx_collision); + host_stats->mactx_frames_dropped = + le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); + host_stats->mactx_jumbo_frames = + le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); + host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); + host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); + host_stats->macrx_unknown_control_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); + host_stats->macrx_pause_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); + host_stats->macrx_control_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); + host_stats->macrx_dribble = + le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); + host_stats->macrx_frame_length_error = + le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); + host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); + host_stats->macrx_carrier_sense_error = + le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); + host_stats->macrx_frame_discarded = + le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); + host_stats->macrx_frames_dropped = + le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); + host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); + host_stats->mac_encoding_error = + le64_to_cpu(ql_iscsi_stats->mac_encoding_error); + host_stats->macrx_length_error_large = + le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); + host_stats->macrx_length_error_small = + le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); + host_stats->macrx_multicast_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); + host_stats->macrx_broadcast_frames = + le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); + host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); + host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); + host_stats->iptx_fragments = + le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); + host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); + host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); + host_stats->iprx_fragments = + le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); + host_stats->ip_datagram_reassembly = + le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); + host_stats->ip_invalid_address_error = + le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); + host_stats->ip_error_packets = + le64_to_cpu(ql_iscsi_stats->ip_error_packets); + host_stats->ip_fragrx_overlap = + le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); + host_stats->ip_fragrx_outoforder = + le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); + host_stats->ip_datagram_reassembly_timeout = + le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); + host_stats->ipv6tx_packets = + le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); + host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); + host_stats->ipv6tx_fragments = + le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); + host_stats->ipv6rx_packets = + le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); + host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); + host_stats->ipv6rx_fragments = + le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); + host_stats->ipv6_datagram_reassembly = + le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); + host_stats->ipv6_invalid_address_error = + le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); + host_stats->ipv6_error_packets = + le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); + host_stats->ipv6_fragrx_overlap = + le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); + host_stats->ipv6_fragrx_outoforder = + le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); + host_stats->ipv6_datagram_reassembly_timeout = + le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); + host_stats->tcptx_segments = + le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); + host_stats->tcptx_bytes = le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); + host_stats->tcprx_segments = + le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); + host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); + host_stats->tcp_duplicate_ack_retx = + le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); + host_stats->tcp_retx_timer_expired = + le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); + host_stats->tcprx_duplicate_ack = + le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); + host_stats->tcprx_pure_ackr = + le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); + host_stats->tcptx_delayed_ack = + le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); + host_stats->tcptx_pure_ack = + le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); + host_stats->tcprx_segment_error = + le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); + host_stats->tcprx_segment_outoforder = + le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); + host_stats->tcprx_window_probe = + le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); + host_stats->tcprx_window_update = + le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); + host_stats->tcptx_window_probe_persist = + le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); + host_stats->ecc_error_correction = + le64_to_cpu(ql_iscsi_stats->ecc_error_correction); + host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); + host_stats->iscsi_data_bytes_tx = + le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); + host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); + host_stats->iscsi_data_bytes_rx = + le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); + host_stats->iscsi_io_completed = + le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); + host_stats->iscsi_unexpected_io_rx = + le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); + host_stats->iscsi_format_error = + le64_to_cpu(ql_iscsi_stats->iscsi_format_error); + host_stats->iscsi_hdr_digest_error = + le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); + host_stats->iscsi_data_digest_error = + le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); + host_stats->iscsi_sequence_error = + le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); +exit_host_stats: + if (ql_iscsi_stats) + dma_free_coherent(&ha->pdev->dev, host_stats_size, + ql_iscsi_stats, iscsi_stats_dma); + + ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", + __func__); + return ret; +} + static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf) { struct Scsi_Host *shost = iscsi_iface_to_shost(iface); struct scsi_qla_host *ha = to_qla_host(shost); + int ival; + char *pval = NULL; int len = -ENOSYS; - if (param_type != ISCSI_NET_PARAM) - return -ENOSYS; + if (param_type == ISCSI_NET_PARAM) { + switch (param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); + break; + case ISCSI_NET_PARAM_IPV4_SUBNET: + len = sprintf(buf, "%pI4\n", + &ha->ip_config.subnet_mask); + break; + case ISCSI_NET_PARAM_IPV4_GW: + len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_IPV4_PROTOCOL_ENABLE, pval); + } else { + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); + } - switch (param) { - case ISCSI_NET_PARAM_IPV4_ADDR: - len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); - break; - case ISCSI_NET_PARAM_IPV4_SUBNET: - len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask); - break; - case ISCSI_NET_PARAM_IPV4_GW: - len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); - break; - case ISCSI_NET_PARAM_IFACE_ENABLE: - if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) - len = sprintf(buf, "%s\n", - (ha->ip_config.ipv4_options & - IPOPT_IPV4_PROTOCOL_ENABLE) ? - "enabled" : "disabled"); - else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: len = sprintf(buf, "%s\n", - (ha->ip_config.ipv6_options & - IPV6_OPT_IPV6_PROTOCOL_ENABLE) ? - "enabled" : "disabled"); - break; - case ISCSI_NET_PARAM_IPV4_BOOTPROTO: - len = sprintf(buf, "%s\n", - (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ? - "dhcp" : "static"); - break; - case ISCSI_NET_PARAM_IPV6_ADDR: - if (iface->iface_num == 0) - len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0); - if (iface->iface_num == 1) - len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1); - break; - case ISCSI_NET_PARAM_IPV6_LINKLOCAL: - len = sprintf(buf, "%pI6\n", - &ha->ip_config.ipv6_link_local_addr); - break; - case ISCSI_NET_PARAM_IPV6_ROUTER: - len = sprintf(buf, "%pI6\n", - &ha->ip_config.ipv6_default_router_addr); - break; - case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: - len = sprintf(buf, "%s\n", - (ha->ip_config.ipv6_addl_options & - IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? - "nd" : "static"); - break; - case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: - len = sprintf(buf, "%s\n", - (ha->ip_config.ipv6_addl_options & - IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? - "auto" : "static"); - break; - case ISCSI_NET_PARAM_VLAN_ID: - if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + (ha->ip_config.tcp_options & + TCPOPT_DHCP_ENABLE) ? + "dhcp" : "static"); + break; + case ISCSI_NET_PARAM_IPV6_ADDR: + if (iface->iface_num == 0) + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_addr0); + if (iface->iface_num == 1) + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_addr1); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL: + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_link_local_addr); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER: + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_default_router_addr); + break; + case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: + pval = (ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? + "nd" : "static"; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: + pval = (ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? + "auto" : "static"; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_VLAN_ID: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + ival = ha->ip_config.ipv4_vlan_tag & + ISCSI_MAX_VLAN_ID; + else + ival = ha->ip_config.ipv6_vlan_tag & + ISCSI_MAX_VLAN_ID; + + len = sprintf(buf, "%d\n", ival); + break; + case ISCSI_NET_PARAM_VLAN_PRIORITY: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + ival = (ha->ip_config.ipv4_vlan_tag >> 13) & + ISCSI_MAX_VLAN_PRIORITY; + else + ival = (ha->ip_config.ipv6_vlan_tag >> 13) & + ISCSI_MAX_VLAN_PRIORITY; + + len = sprintf(buf, "%d\n", ival); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_VLAN_TAGGING_ENABLE, pval); + } else { + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_VLAN_TAGGING_ENABLE, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_MTU: + len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); + break; + case ISCSI_NET_PARAM_PORT: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + ha->ip_config.ipv4_port); + else + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_port); + break; + case ISCSI_NET_PARAM_IPADDR_STATE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv4_addr_state); + } else { + if (iface->iface_num == 0) + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv6_addr0_state); + else if (iface->iface_num == 1) + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv6_addr1_state); + } + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: + pval = iscsi_get_ipaddress_state_name( + ha->ip_config.ipv6_link_local_state); + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: + pval = iscsi_get_router_state_name( + ha->ip_config.ipv6_default_router_state); + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(~ha->ip_config.tcp_options, + TCPOPT_DELAYED_ACK_DISABLE, pval); + } else { + OP_STATE(~ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(~ha->ip_config.tcp_options, + TCPOPT_NAGLE_ALGO_DISABLE, pval); + } else { + OP_STATE(~ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(~ha->ip_config.tcp_options, + TCPOPT_WINDOW_SCALE_DISABLE, pval); + } else { + OP_STATE(~ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_WINDOW_SCALE_DISABLE, + pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_TCP_WSF: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + ha->ip_config.tcp_wsf); + else + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_tcp_wsf); + break; + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + ival = (ha->ip_config.tcp_options & + TCPOPT_TIMER_SCALE) >> 1; + else + ival = (ha->ip_config.ipv6_tcp_options & + IPV6_TCPOPT_TIMER_SCALE) >> 1; + + len = sprintf(buf, "%d\n", ival); + break; + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.tcp_options, + TCPOPT_TIMESTAMP_ENABLE, pval); + } else { + OP_STATE(ha->ip_config.ipv6_tcp_options, + IPV6_TCPOPT_TIMESTAMP_EN, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_CACHE_ID: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + ha->ip_config.ipv4_cache_id); + else + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_cache_id); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: + OP_STATE(ha->ip_config.tcp_options, + TCPOPT_DNS_SERVER_IP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: + OP_STATE(ha->ip_config.tcp_options, + TCPOPT_SLP_DA_INFO_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_TOS_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_IPV4_TOS_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_TOS: + len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); + break; + case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_GRAT_ARP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: + OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, + pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: + pval = (ha->ip_config.ipv4_alt_cid_len) ? + (char *)ha->ip_config.ipv4_alt_cid : ""; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_REQ_VID_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_USE_VID_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: + pval = (ha->ip_config.ipv4_vid_len) ? + (char *)ha->ip_config.ipv4_vid : ""; + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_LEARN_IQN_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: + OP_STATE(~ha->ip_config.ipv4_options, + IPOPT_FRAGMENTATION_DISABLE, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_IN_FORWARD_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_REDIRECT_EN: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + OP_STATE(ha->ip_config.ipv4_options, + IPOPT_ARP_REDIRECT_EN, pval); + } else { + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_REDIRECT_EN, pval); + } + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV4_TTL: + len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); + break; + case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: + OP_STATE(ha->ip_config.ipv6_options, + IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_MLD_EN: + OP_STATE(ha->ip_config.ipv6_addl_options, + IPV6_ADDOPT_MLD_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: + len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); + break; + case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: len = sprintf(buf, "%d\n", - (ha->ip_config.ipv4_vlan_tag & - ISCSI_MAX_VLAN_ID)); - else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + ha->ip_config.ipv6_traffic_class); + break; + case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: len = sprintf(buf, "%d\n", - (ha->ip_config.ipv6_vlan_tag & - ISCSI_MAX_VLAN_ID)); - break; - case ISCSI_NET_PARAM_VLAN_PRIORITY: - if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + ha->ip_config.ipv6_hop_limit); + break; + case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: len = sprintf(buf, "%d\n", - ((ha->ip_config.ipv4_vlan_tag >> 13) & - ISCSI_MAX_VLAN_PRIORITY)); - else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + ha->ip_config.ipv6_nd_reach_time); + break; + case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: len = sprintf(buf, "%d\n", - ((ha->ip_config.ipv6_vlan_tag >> 13) & - ISCSI_MAX_VLAN_PRIORITY)); - break; - case ISCSI_NET_PARAM_VLAN_ENABLED: - if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) - len = sprintf(buf, "%s\n", - (ha->ip_config.ipv4_options & - IPOPT_VLAN_TAGGING_ENABLE) ? - "enabled" : "disabled"); - else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) - len = sprintf(buf, "%s\n", - (ha->ip_config.ipv6_options & - IPV6_OPT_VLAN_TAGGING_ENABLE) ? - "enabled" : "disabled"); - break; - case ISCSI_NET_PARAM_MTU: - len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); - break; - case ISCSI_NET_PARAM_PORT: - if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) - len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port); - else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) - len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port); - break; - default: - len = -ENOSYS; + ha->ip_config.ipv6_nd_rexmit_timer); + break; + case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_nd_stale_timeout); + break; + case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_dup_addr_detect_count); + break; + case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: + len = sprintf(buf, "%d\n", + ha->ip_config.ipv6_gw_advrt_mtu); + break; + default: + len = -ENOSYS; + } + } else if (param_type == ISCSI_IFACE_PARAM) { + switch (param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); + break; + case ISCSI_IFACE_PARAM_HDRDGST_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_HEADER_DIGEST_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DATADGST_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DATA_DIGEST_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_IMMEDIATE_DATA_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_INITIAL_R2T_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DATA_PDU_INORDER_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_ERL: + len = sprintf(buf, "%d\n", + (ha->ip_config.iscsi_options & + ISCSIOPTS_ERL)); + break; + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + len = sprintf(buf, "%u\n", + ha->ip_config.iscsi_max_pdu_size * + BYTE_UNITS); + break; + case ISCSI_IFACE_PARAM_FIRST_BURST: + len = sprintf(buf, "%u\n", + ha->ip_config.iscsi_first_burst_len * + BYTE_UNITS); + break; + case ISCSI_IFACE_PARAM_MAX_R2T: + len = sprintf(buf, "%d\n", + ha->ip_config.iscsi_max_outstnd_r2t); + break; + case ISCSI_IFACE_PARAM_MAX_BURST: + len = sprintf(buf, "%u\n", + ha->ip_config.iscsi_max_burst_len * + BYTE_UNITS); + break; + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_CHAP_AUTH_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_BIDI_CHAP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DISCOVERY_AUTH_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + OP_STATE(ha->ip_config.iscsi_options, + ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); + + len = sprintf(buf, "%s\n", pval); + break; + case ISCSI_IFACE_PARAM_INITIATOR_NAME: + len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); + break; + default: + len = -ENOSYS; + } } return len; @@ -687,16 +1670,13 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, struct sockaddr_in *addr; struct sockaddr_in6 *addr6; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); if (!shost) { ret = -ENXIO; - printk(KERN_ERR "%s: shost is NULL\n", - __func__); + pr_err("%s: shost is NULL\n", __func__); return ERR_PTR(ret); } ha = iscsi_host_priv(shost); - ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); if (!ep) { ret = -ENOMEM; @@ -716,6 +1696,9 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, (char *)&addr6->sin6_addr)); + } else { + ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", + __func__); } qla_ep->host = shost; @@ -729,9 +1712,9 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) struct scsi_qla_host *ha; int ret = 0; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); qla_ep = ep->dd_data; ha = to_qla_host(qla_ep->host); + DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no)); if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags)) ret = 1; @@ -741,7 +1724,13 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) { - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + struct qla_endpoint *qla_ep; + struct scsi_qla_host *ha; + + qla_ep = ep->dd_data; + ha = to_qla_host(qla_ep->host); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); iscsi_destroy_endpoint(ep); } @@ -751,15 +1740,18 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, { struct qla_endpoint *qla_ep = ep->dd_data; struct sockaddr *dst_addr; + struct scsi_qla_host *ha; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + if (!qla_ep) + return -ENOTCONN; + + ha = to_qla_host(qla_ep->host); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); switch (param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: - if (!qla_ep) - return -ENOTCONN; - dst_addr = (struct sockaddr *)&qla_ep->dst_addr; if (!dst_addr) return -ENOTCONN; @@ -783,13 +1775,13 @@ static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, int ret; dma_addr_t iscsi_stats_dma; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); - cls_sess = iscsi_conn_to_session(cls_conn); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); /* Allocate memory */ ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, @@ -1042,8 +2034,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, cpu_to_le16( IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); else - ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " - "IPv6 addr\n"); + ql4_printk(KERN_ERR, ha, + "Invalid autocfg setting for IPv6 addr\n"); break; case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: /* Autocfg applies to even interface */ @@ -1059,8 +2051,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); else - ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " - "IPv6 linklocal addr\n"); + ql4_printk(KERN_ERR, ha, + "Invalid autocfg setting for IPv6 linklocal addr\n"); break; case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: /* Autocfg applies to even interface */ @@ -1109,6 +2101,136 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, init_fw_cb->ipv6_port = cpu_to_le16(*(uint16_t *)iface_param->value); break; + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & + 0xFFFF); + break; + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16((iface_param->value[0] << 1) & + IPV6_TCPOPT_TIMER_SCALE); + break; + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_tcp_opts |= + cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); + else + init_fw_cb->ipv6_tcp_opts &= + cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); + break; + case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); + else + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); + break; + case ISCSI_NET_PARAM_REDIRECT_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_REDIRECT_EN); + else + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_REDIRECT_EN); + break; + case ISCSI_NET_PARAM_IPV6_MLD_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv6_addtl_opts |= + cpu_to_le16(IPV6_ADDOPT_MLD_EN); + else + init_fw_cb->ipv6_addtl_opts &= + cpu_to_le16(~IPV6_ADDOPT_MLD_EN); + break; + case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_flow_lbl = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_traffic_class = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_hop_limit = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_nd_reach_time = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_nd_rexmit_timer = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_nd_stale_timeout = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv6_gw_advrt_mtu = + cpu_to_le32(*(uint32_t *)iface_param->value); + break; default: ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", iface_param->param); @@ -1177,6 +2299,196 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, init_fw_cb->ipv4_port = cpu_to_le16(*(uint16_t *)iface_param->value); break; + case ISCSI_NET_PARAM_DELAYED_ACK_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & + 0xFFFF); + break; + case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); + break; + case ISCSI_NET_PARAM_TCP_WSF: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_TCP_TIMER_SCALE: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16((iface_param->value[0] << 1) & + TCPOPT_TIMER_SCALE); + break; + case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); + else + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); + break; + case ISCSI_NET_PARAM_IPV4_TOS_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_IPV4_TOS_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_IPV4_TOS_EN); + break; + case ISCSI_NET_PARAM_IPV4_TOS: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_tos = iface_param->value[0]; + break; + case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_GRAT_ARP_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_GRAT_ARP_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_ALT_CID_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_ALT_CID_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, + (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); + init_fw_cb->ipv4_dhcp_alt_cid_len = + strlen(init_fw_cb->ipv4_dhcp_alt_cid); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_REQ_VID_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_REQ_VID_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_USE_VID_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_USE_VID_EN); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, + (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); + init_fw_cb->ipv4_dhcp_vid_len = + strlen(init_fw_cb->ipv4_dhcp_vid); + break; + case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_LEARN_IQN_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_LEARN_IQN_EN); + break; + case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); + break; + case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_IN_FORWARD_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_IN_FORWARD_EN); + break; + case ISCSI_NET_PARAM_REDIRECT_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_ARP_REDIRECT_EN); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); + break; + case ISCSI_NET_PARAM_IPV4_TTL: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->ipv4_ttl = iface_param->value[0]; + break; default: ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", iface_param->param); @@ -1184,6 +2496,168 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, } } +static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, + struct iscsi_iface_param_info *iface_param, + struct addr_ctrl_blk *init_fw_cb) +{ + switch (iface_param->param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->def_timeout = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_IFACE_PARAM_HDRDGST_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); + break; + case ISCSI_IFACE_PARAM_DATADGST_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); + break; + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); + break; + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); + break; + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); + break; + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); + break; + case ISCSI_IFACE_PARAM_ERL: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); + init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & + ISCSIOPTS_ERL); + break; + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_max_pdu_size = + cpu_to_le32(*(uint32_t *)iface_param->value) / + BYTE_UNITS; + break; + case ISCSI_IFACE_PARAM_FIRST_BURST: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_fburst_len = + cpu_to_le32(*(uint32_t *)iface_param->value) / + BYTE_UNITS; + break; + case ISCSI_IFACE_PARAM_MAX_R2T: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_max_outstnd_r2t = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_IFACE_PARAM_MAX_BURST: + if (iface_param->iface_num & 0x1) + break; + init_fw_cb->iscsi_max_burst_len = + cpu_to_le32(*(uint32_t *)iface_param->value) / + BYTE_UNITS; + break; + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); + break; + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); + break; + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); + break; + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + if (iface_param->iface_num & 0x1) + break; + if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) + init_fw_cb->iscsi_opts |= + cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); + else + init_fw_cb->iscsi_opts &= + cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); + break; + default: + ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", + iface_param->param); + break; + } +} + static void qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) { @@ -1241,40 +2715,47 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) nla_for_each_attr(attr, data, len, rem) { iface_param = nla_data(attr); - if (iface_param->param_type != ISCSI_NET_PARAM) - continue; - - switch (iface_param->iface_type) { - case ISCSI_IFACE_TYPE_IPV4: - switch (iface_param->iface_num) { - case 0: - qla4xxx_set_ipv4(ha, iface_param, init_fw_cb); - break; - default: + if (iface_param->param_type == ISCSI_NET_PARAM) { + switch (iface_param->iface_type) { + case ISCSI_IFACE_TYPE_IPV4: + switch (iface_param->iface_num) { + case 0: + qla4xxx_set_ipv4(ha, iface_param, + init_fw_cb); + break; + default: /* Cannot have more than one IPv4 interface */ - ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface " - "number = %d\n", - iface_param->iface_num); + ql4_printk(KERN_ERR, ha, + "Invalid IPv4 iface number = %d\n", + iface_param->iface_num); + break; + } break; - } - break; - case ISCSI_IFACE_TYPE_IPV6: - switch (iface_param->iface_num) { - case 0: - case 1: - qla4xxx_set_ipv6(ha, iface_param, init_fw_cb); + case ISCSI_IFACE_TYPE_IPV6: + switch (iface_param->iface_num) { + case 0: + case 1: + qla4xxx_set_ipv6(ha, iface_param, + init_fw_cb); + break; + default: + /* Cannot have more than two IPv6 interface */ + ql4_printk(KERN_ERR, ha, + "Invalid IPv6 iface number = %d\n", + iface_param->iface_num); + break; + } break; default: - /* Cannot have more than two IPv6 interface */ - ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface " - "number = %d\n", - iface_param->iface_num); + ql4_printk(KERN_ERR, ha, + "Invalid iface type\n"); break; } - break; - default: - ql4_printk(KERN_ERR, ha, "Invalid iface type\n"); - break; + } else if (iface_param->param_type == ISCSI_IFACE_PARAM) { + qla4xxx_set_iscsi_param(ha, iface_param, + init_fw_cb); + } else { + continue; } } @@ -1328,9 +2809,12 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, struct iscsi_session *sess = cls_sess->dd_data; struct ddb_entry *ddb_entry = sess->dd_data; struct scsi_qla_host *ha = ddb_entry->ha; + struct iscsi_cls_conn *cls_conn = ddb_entry->conn; + struct ql4_chap_table chap_tbl; int rval, len; uint16_t idx; + memset(&chap_tbl, 0, sizeof(chap_tbl)); switch (param) { case ISCSI_PARAM_CHAP_IN_IDX: rval = qla4xxx_get_chap_index(ha, sess->username_in, @@ -1342,14 +2826,46 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, len = sprintf(buf, "%hu\n", idx); break; case ISCSI_PARAM_CHAP_OUT_IDX: - rval = qla4xxx_get_chap_index(ha, sess->username, - sess->password, LOCAL_CHAP, - &idx); + if (ddb_entry->ddb_type == FLASH_DDB) { + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { + idx = ddb_entry->chap_tbl_idx; + rval = QLA_SUCCESS; + } else { + rval = QLA_ERROR; + } + } else { + rval = qla4xxx_get_chap_index(ha, sess->username, + sess->password, + LOCAL_CHAP, &idx); + } if (rval) len = sprintf(buf, "\n"); else len = sprintf(buf, "%hu\n", idx); break; + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + /* First, populate session username and password for FLASH DDB, + * if not already done. This happens when session login fails + * for a FLASH DDB. + */ + if (ddb_entry->ddb_type == FLASH_DDB && + ddb_entry->chap_tbl_idx != INVALID_ENTRY && + !sess->username && !sess->password) { + idx = ddb_entry->chap_tbl_idx; + rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, + chap_tbl.secret, + idx); + if (!rval) { + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, + (char *)chap_tbl.name, + strlen((char *)chap_tbl.name)); + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, + (char *)chap_tbl.secret, + chap_tbl.secret_len); + } + } + /* allow fall-through */ default: return iscsi_session_get_param(cls_sess, param, buf); } @@ -1363,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, struct iscsi_conn *conn; struct qla_conn *qla_conn; struct sockaddr *dst_addr; - int len = 0; conn = cls_conn->dd_data; qla_conn = conn->dd_data; @@ -1377,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, default: return iscsi_conn_get_param(cls_conn, param, buf); } - - return len; - } int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) @@ -1540,7 +3052,6 @@ qla4xxx_session_create(struct iscsi_endpoint *ep, struct sockaddr *dst_addr; int ret; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); if (!ep) { printk(KERN_ERR "qla4xxx: missing ep.\n"); return NULL; @@ -1549,6 +3060,8 @@ qla4xxx_session_create(struct iscsi_endpoint *ep, qla_ep = ep->dd_data; dst_addr = (struct sockaddr *)&qla_ep->dst_addr; ha = to_qla_host(qla_ep->host); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); ret = qla4xxx_get_ddb_index(ha, &ddb_index); if (ret == QLA_ERROR) @@ -1569,6 +3082,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep, ddb_entry->sess = cls_sess; ddb_entry->unblock_sess = qla4xxx_unblock_ddb; ddb_entry->ddb_change = qla4xxx_ddb_change; + clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); cls_sess->recovery_tmo = ql4xsess_recovery_tmo; ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; ha->tot_ddbs++; @@ -1587,10 +3101,11 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) uint32_t ddb_state; int ret; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, + ha->host_no)); fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); @@ -1618,7 +3133,8 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) destroy_session: qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); - + if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) + clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); spin_lock_irqsave(&ha->hardware_lock, flags); qla4xxx_free_ddb(ha, ddb_entry); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1636,17 +3152,23 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) struct iscsi_cls_conn *cls_conn; struct iscsi_session *sess; struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_idx); - if (!cls_conn) + if (!cls_conn) { + pr_info("%s: Can not create connection for conn_idx = %u\n", + __func__, conn_idx); return NULL; + } sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ddb_entry->conn = cls_conn; + ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, + conn_idx)); return cls_conn; } @@ -1657,8 +3179,16 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_conn *conn; struct qla_conn *qla_conn; struct iscsi_endpoint *ep; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + struct iscsi_session *sess; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, + cls_session->sid, cls_conn->cid)); if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) return -EINVAL; @@ -1681,10 +3211,11 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) int ret = 0; int status = QLA_SUCCESS; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, + cls_sess->sid, cls_conn->cid)); /* Check if we have matching FW DDB, if yes then do not * login to this target. This could cause target to logout previous @@ -1758,10 +3289,11 @@ static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) struct ddb_entry *ddb_entry; int options; - DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); sess = cls_sess->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, + cls_conn->cid)); options = LOGOUT_OPTION_CLOSE_SESSION; if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) @@ -1922,6 +3454,354 @@ static int qla4xxx_task_xmit(struct iscsi_task *task) return -ENOSYS; } +static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, + struct iscsi_bus_flash_conn *conn, + struct dev_db_entry *fw_ddb_entry) +{ + unsigned long options = 0; + int rc = 0; + + options = le16_to_cpu(fw_ddb_entry->options); + conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); + if (test_bit(OPT_IPV6_DEVICE, &options)) { + rc = iscsi_switch_str_param(&sess->portal_type, + PORTAL_TYPE_IPV6); + if (rc) + goto exit_copy; + } else { + rc = iscsi_switch_str_param(&sess->portal_type, + PORTAL_TYPE_IPV4); + if (rc) + goto exit_copy; + } + + sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, + &options); + sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); + sess->entry_state = test_bit(OPT_ENTRY_STATE, &options); + + options = le16_to_cpu(fw_ddb_entry->iscsi_options); + conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); + conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); + sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); + sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); + sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, + &options); + sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); + sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); + conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options); + sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, + &options); + sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); + sess->discovery_auth_optional = + test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); + if (test_bit(ISCSIOPT_ERL1, &options)) + sess->erl |= BIT_1; + if (test_bit(ISCSIOPT_ERL0, &options)) + sess->erl |= BIT_0; + + options = le16_to_cpu(fw_ddb_entry->tcp_options); + conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); + conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); + conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); + if (test_bit(TCPOPT_TIMER_SCALE3, &options)) + conn->tcp_timer_scale |= BIT_3; + if (test_bit(TCPOPT_TIMER_SCALE2, &options)) + conn->tcp_timer_scale |= BIT_2; + if (test_bit(TCPOPT_TIMER_SCALE1, &options)) + conn->tcp_timer_scale |= BIT_1; + + conn->tcp_timer_scale >>= 1; + conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); + + options = le16_to_cpu(fw_ddb_entry->ip_options); + conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); + + conn->max_recv_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); + conn->max_xmit_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); + sess->first_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); + sess->max_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); + sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); + sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); + sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); + conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; + conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; + conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl); + conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout); + conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); + conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); + conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); + sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link); + sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link); + sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); + sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); + + sess->default_taskmgmt_timeout = + le16_to_cpu(fw_ddb_entry->def_timeout); + conn->port = le16_to_cpu(fw_ddb_entry->port); + + options = le16_to_cpu(fw_ddb_entry->options); + conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); + if (!conn->ipaddress) { + rc = -ENOMEM; + goto exit_copy; + } + + conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); + if (!conn->redirect_ipaddr) { + rc = -ENOMEM; + goto exit_copy; + } + + memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN); + memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN); + + if (test_bit(OPT_IPV6_DEVICE, &options)) { + conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; + + conn->link_local_ipv6_addr = kmemdup( + fw_ddb_entry->link_local_ipv6_addr, + IPv6_ADDR_LEN, GFP_KERNEL); + if (!conn->link_local_ipv6_addr) { + rc = -ENOMEM; + goto exit_copy; + } + } else { + conn->ipv4_tos = fw_ddb_entry->ipv4_tos; + } + + if (fw_ddb_entry->iscsi_name[0]) { + rc = iscsi_switch_str_param(&sess->targetname, + (char *)fw_ddb_entry->iscsi_name); + if (rc) + goto exit_copy; + } + + if (fw_ddb_entry->iscsi_alias[0]) { + rc = iscsi_switch_str_param(&sess->targetalias, + (char *)fw_ddb_entry->iscsi_alias); + if (rc) + goto exit_copy; + } + + COPY_ISID(sess->isid, fw_ddb_entry->isid); + +exit_copy: + return rc; +} + +static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, + struct iscsi_bus_flash_conn *conn, + struct dev_db_entry *fw_ddb_entry) +{ + uint16_t options; + int rc = 0; + + options = le16_to_cpu(fw_ddb_entry->options); + SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11); + if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) + options |= BIT_8; + else + options &= ~BIT_8; + + SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6); + SET_BITVAL(sess->discovery_sess, options, BIT_4); + SET_BITVAL(sess->entry_state, options, BIT_3); + fw_ddb_entry->options = cpu_to_le16(options); + + options = le16_to_cpu(fw_ddb_entry->iscsi_options); + SET_BITVAL(conn->hdrdgst_en, options, BIT_13); + SET_BITVAL(conn->datadgst_en, options, BIT_12); + SET_BITVAL(sess->imm_data_en, options, BIT_11); + SET_BITVAL(sess->initial_r2t_en, options, BIT_10); + SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9); + SET_BITVAL(sess->pdu_inorder_en, options, BIT_8); + SET_BITVAL(sess->chap_auth_en, options, BIT_7); + SET_BITVAL(conn->snack_req_en, options, BIT_6); + SET_BITVAL(sess->discovery_logout_en, options, BIT_5); + SET_BITVAL(sess->bidi_chap_en, options, BIT_4); + SET_BITVAL(sess->discovery_auth_optional, options, BIT_3); + SET_BITVAL(sess->erl & BIT_1, options, BIT_1); + SET_BITVAL(sess->erl & BIT_0, options, BIT_0); + fw_ddb_entry->iscsi_options = cpu_to_le16(options); + + options = le16_to_cpu(fw_ddb_entry->tcp_options); + SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6); + SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5); + SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4); + SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3); + SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2); + SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1); + SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0); + fw_ddb_entry->tcp_options = cpu_to_le16(options); + + options = le16_to_cpu(fw_ddb_entry->ip_options); + SET_BITVAL(conn->fragment_disable, options, BIT_4); + fw_ddb_entry->ip_options = cpu_to_le16(options); + + fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); + fw_ddb_entry->iscsi_max_rcv_data_seg_len = + cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS); + fw_ddb_entry->iscsi_max_snd_data_seg_len = + cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS); + fw_ddb_entry->iscsi_first_burst_len = + cpu_to_le16(sess->first_burst / BYTE_UNITS); + fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst / + BYTE_UNITS); + fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait); + fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); + fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); + fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); + fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); + fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); + fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); + fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); + fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); + fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); + fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); + fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx); + fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); + fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); + fw_ddb_entry->port = cpu_to_le16(conn->port); + fw_ddb_entry->def_timeout = + cpu_to_le16(sess->default_taskmgmt_timeout); + + if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4)) + fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; + else + fw_ddb_entry->ipv4_tos = conn->ipv4_tos; + + if (conn->ipaddress) + memcpy(fw_ddb_entry->ip_addr, conn->ipaddress, + sizeof(fw_ddb_entry->ip_addr)); + + if (conn->redirect_ipaddr) + memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr, + sizeof(fw_ddb_entry->tgt_addr)); + + if (conn->link_local_ipv6_addr) + memcpy(fw_ddb_entry->link_local_ipv6_addr, + conn->link_local_ipv6_addr, + sizeof(fw_ddb_entry->link_local_ipv6_addr)); + + if (sess->targetname) + memcpy(fw_ddb_entry->iscsi_name, sess->targetname, + sizeof(fw_ddb_entry->iscsi_name)); + + if (sess->targetalias) + memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias, + sizeof(fw_ddb_entry->iscsi_alias)); + + COPY_ISID(fw_ddb_entry->isid, sess->isid); + + return rc; +} + +static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn, + struct iscsi_session *sess, + struct dev_db_entry *fw_ddb_entry) +{ + unsigned long options = 0; + uint16_t ddb_link; + uint16_t disc_parent; + char ip_addr[DDB_IPADDR_LEN]; + + options = le16_to_cpu(fw_ddb_entry->options); + conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); + sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE, + &options); + sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options); + + options = le16_to_cpu(fw_ddb_entry->iscsi_options); + conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options); + conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options); + sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options); + sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options); + sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER, + &options); + sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options); + sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options); + sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN, + &options); + sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options); + sess->discovery_auth_optional = + test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options); + if (test_bit(ISCSIOPT_ERL1, &options)) + sess->erl |= BIT_1; + if (test_bit(ISCSIOPT_ERL0, &options)) + sess->erl |= BIT_0; + + options = le16_to_cpu(fw_ddb_entry->tcp_options); + conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options); + conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options); + conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options); + if (test_bit(TCPOPT_TIMER_SCALE3, &options)) + conn->tcp_timer_scale |= BIT_3; + if (test_bit(TCPOPT_TIMER_SCALE2, &options)) + conn->tcp_timer_scale |= BIT_2; + if (test_bit(TCPOPT_TIMER_SCALE1, &options)) + conn->tcp_timer_scale |= BIT_1; + + conn->tcp_timer_scale >>= 1; + conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options); + + options = le16_to_cpu(fw_ddb_entry->ip_options); + conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options); + + conn->max_recv_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); + conn->max_xmit_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); + sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); + sess->first_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); + sess->max_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); + sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); + sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss); + conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf; + conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf; + conn->ipv4_tos = fw_ddb_entry->ipv4_tos; + conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout); + conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port); + conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn); + conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn); + sess->tsid = le16_to_cpu(fw_ddb_entry->tsid); + COPY_ISID(sess->isid, fw_ddb_entry->isid); + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link == DDB_ISNS) + disc_parent = ISCSI_DISC_PARENT_ISNS; + else if (ddb_link == DDB_NO_LINK) + disc_parent = ISCSI_DISC_PARENT_UNKNOWN; + else if (ddb_link < MAX_DDB_ENTRIES) + disc_parent = ISCSI_DISC_PARENT_SENDTGT; + else + disc_parent = ISCSI_DISC_PARENT_UNKNOWN; + + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, + iscsi_get_discovery_parent_name(disc_parent), 0); + + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS, + (char *)fw_ddb_entry->iscsi_alias, 0); + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) { + memset(ip_addr, 0, sizeof(ip_addr)); + sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); + iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, + (char *)ip_addr, 0); + } +} + static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, struct iscsi_cls_session *cls_sess, @@ -1930,6 +3810,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, int buflen = 0; struct iscsi_session *sess; struct ddb_entry *ddb_entry; + struct ql4_chap_table chap_tbl; struct iscsi_conn *conn; char ip_addr[DDB_IPADDR_LEN]; uint16_t options = 0; @@ -1937,50 +3818,46 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, sess = cls_sess->dd_data; ddb_entry = sess->dd_data; conn = cls_conn->dd_data; + memset(&chap_tbl, 0, sizeof(chap_tbl)); ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); - conn->max_recv_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); - - conn->max_xmit_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); - - sess->initial_r2t_en = - (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); - - sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->first_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); - - sess->max_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); - - sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); - - sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); + sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout); conn->persistent_port = le16_to_cpu(fw_ddb_entry->port); - sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); - + memset(ip_addr, 0, sizeof(ip_addr)); options = le16_to_cpu(fw_ddb_entry->options); - if (options & DDB_OPT_IPV6_DEVICE) + if (options & DDB_OPT_IPV6_DEVICE) { + iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4); + + memset(ip_addr, 0, sizeof(ip_addr)); sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr); - else + } else { + iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4); sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr); + } + iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, + (char *)ip_addr, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME, (char *)fw_ddb_entry->iscsi_name, buflen); iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME, (char *)ha->name_string, buflen); - iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, - (char *)ip_addr, buflen); - iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS, - (char *)fw_ddb_entry->iscsi_alias, buflen); + + if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { + if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, + chap_tbl.secret, + ddb_entry->chap_tbl_idx)) { + iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, + (char *)chap_tbl.name, + strlen((char *)chap_tbl.name)); + iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, + (char *)chap_tbl.secret, + chap_tbl.secret_len); + } + } } void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, @@ -2068,37 +3945,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, /* Update params */ ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); - conn->max_recv_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); - - conn->max_xmit_dlength = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); - - sess->initial_r2t_en = - (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); - - sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options)); - - sess->first_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); - - sess->max_burst = BYTE_UNITS * - le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); - - sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); - - sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); - - sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); memcpy(sess->initiatorname, ha->name_string, min(sizeof(ha->name_string), sizeof(sess->initiatorname))); - iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS, - (char *)fw_ddb_entry->iscsi_alias, 0); - exit_session_conn_param: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), @@ -2243,6 +4094,8 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) !test_bit(AF_ONLINE, &ha->flags) || !test_bit(AF_LINK_UP, &ha->flags) || test_bit(AF_LOOPBACK, &ha->flags) || + test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) || + test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) goto qc_host_busy; @@ -2317,7 +4170,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) if (ha->nx_pcibase) iounmap( (struct device_reg_82xx __iomem *)ha->nx_pcibase); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { if (ha->nx_pcibase) iounmap( (struct device_reg_83xx __iomem *)ha->nx_pcibase); @@ -2511,7 +4364,7 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) __func__); if (halt_status & HALT_STATUS_UNRECOVERABLE) halt_status_unrecoverable = 1; - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { if (halt_status & QLA83XX_HALT_STATUS_FW_RESET) ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n", __func__); @@ -2543,6 +4396,12 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha) void qla4_8xxx_watchdog(struct scsi_qla_host *ha) { uint32_t dev_state; + uint32_t idc_ctrl; + + if (is_qla8032(ha) && + (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) + WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", + __func__, ha->func_num); /* don't poll if reset is going on */ if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || @@ -2561,10 +4420,23 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) qla4xxx_wake_dpc(ha); } else if (dev_state == QLA8XXX_DEV_NEED_RESET && !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { - if (is_qla8032(ha) || + + ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n", + __func__); + + if (is_qla8032(ha) || is_qla8042(ha)) { + idc_ctrl = qla4_83xx_rd_reg(ha, + QLA83XX_IDC_DRV_CTRL); + if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) { + ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n", + __func__); + qla4xxx_mailbox_premature_completion( + ha); + } + } + + if ((is_qla8032(ha) || is_qla8042(ha)) || (is_qla8022(ha) && !ql4xdontresethba)) { - ql4_printk(KERN_INFO, ha, "%s: HW State: " - "NEED RESET!\n", __func__); set_bit(DPC_RESET_HA, &ha->dpc_flags); qla4xxx_wake_dpc(ha); } @@ -2688,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || + test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || test_bit(DPC_AEN, &ha->dpc_flags)) { DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" " - dpc flags = 0x%lx\n", @@ -2713,11 +4586,19 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) uint32_t index = 0; unsigned long flags; struct scsi_cmnd *cmd; + unsigned long wtime; + uint32_t wtmo; - unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ); + if (is_qla40XX(ha)) + wtmo = WAIT_CMD_TOV; + else + wtmo = ha->nx_reset_timeout / 2; - DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to " - "complete\n", WAIT_CMD_TOV)); + wtime = jiffies + (wtmo * HZ); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Wait up to %u seconds for cmds to complete\n", + wtmo)); while (!time_after_eq(jiffies, wtime)) { spin_lock_irqsave(&ha->hardware_lock, flags); @@ -2947,7 +4828,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); - if (is_qla8032(ha) && + if ((is_qla8032(ha) || is_qla8042(ha)) && !test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) { ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", __func__); @@ -2977,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) ha->host_no, __func__)); status = ha->isp_ops->reset_firmware(ha); if (status == QLA_SUCCESS) { - if (!test_bit(AF_FW_RECOVERY, &ha->flags)) - qla4xxx_cmd_wait(ha); - ha->isp_ops->disable_intrs(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); @@ -3020,11 +4898,11 @@ chip_reset: qla4xxx_cmd_wait(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); - qla4xxx_abort_active_cmds(ha, DID_RESET << 16); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s - Performing chip reset..\n", ha->host_no, __func__)); status = ha->isp_ops->reset_chip(ha); + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); } /* Flush any pending ddb changed AENs */ @@ -3040,8 +4918,21 @@ recover_ha_init_adapter: ssleep(6); /* NOTE: AF_ONLINE flag set upon successful completion of - * qla4xxx_initialize_adapter */ + * qla4xxx_initialize_adapter */ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); + if (is_qla80XX(ha) && (status == QLA_ERROR)) { + status = qla4_8xxx_check_init_adapter_retry(ha); + if (status == QLA_ERROR) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", + ha->host_no, __func__); + qla4xxx_dead_adapter_cleanup(ha); + clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA, &ha->dpc_flags); + clear_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + goto exit_recover; + } + } } /* Retry failed adapter initialization, if necessary @@ -3145,7 +5036,9 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) } else { /* Trigger relogin */ if (ddb_entry->ddb_type == FLASH_DDB) { - if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) + if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) || + test_bit(DF_DISABLE_RELOGIN, + &ddb_entry->flags))) qla4xxx_arm_relogin_timer(ddb_entry); } else iscsi_session_failure(cls_session->dd_data, @@ -3248,6 +5141,9 @@ static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess) if (!(ddb_entry->ddb_type == FLASH_DDB)) return; + if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) + return; + if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && !iscsi_is_session_online(cls_sess)) { DEBUG2(ql4_printk(KERN_INFO, ha, @@ -3382,9 +5278,9 @@ static void qla4xxx_do_dpc(struct work_struct *work) container_of(work, struct scsi_qla_host, dpc_work); int status = QLA_ERROR; - DEBUG2(printk("scsi%ld: %s: DPC handler waking up." - "flags = 0x%08lx, dpc_flags = 0x%08lx\n", - ha->host_no, __func__, ha->flags, ha->dpc_flags)) + DEBUG2(ql4_printk(KERN_INFO, ha, + "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", + ha->host_no, __func__, ha->flags, ha->dpc_flags)); /* Initialization not yet finished. Don't do anything yet. */ if (!test_bit(AF_INIT_DONE, &ha->flags)) @@ -3401,7 +5297,7 @@ static void qla4xxx_do_dpc(struct work_struct *work) if (is_qla80XX(ha)) { if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n", __func__); /* disable pause frame for ISP83xx */ @@ -3416,8 +5312,35 @@ static void qla4xxx_do_dpc(struct work_struct *work) qla4_8xxx_device_state_handler(ha); } - if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) + if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) { + if (is_qla8042(ha)) { + if (ha->idc_info.info2 & + ENABLE_INTERNAL_LOOPBACK) { + ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n", + __func__); + status = qla4_84xx_config_acb(ha, + ACB_CONFIG_DISABLE); + if (status != QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n", + __func__); + } + } + } qla4_83xx_post_idc_ack(ha); + clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags); + } + + if (is_qla8042(ha) && + test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) { + ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n", + __func__); + if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) != + QLA_SUCCESS) { + ql4_printk(KERN_INFO, ha, "%s: ACB config failed ", + __func__); + } + clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags); + } if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { qla4_8xxx_need_qsnt_handler(ha); @@ -3429,7 +5352,8 @@ static void qla4xxx_do_dpc(struct work_struct *work) test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) { if ((is_qla8022(ha) && ql4xdontresethba) || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); clear_bit(DPC_RESET_HA, &ha->dpc_flags); @@ -3501,6 +5425,11 @@ dpc_post_reset_ha: qla4xxx_relogin_all_devices(ha); } } + if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { + if (qla4xxx_sysfs_ddb_export(ha)) + ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", + __func__); + } } /** @@ -3521,7 +5450,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha) } else if (is_qla8022(ha)) { writel(0, &ha->qla4_82xx_reg->host_int); readl(&ha->qla4_82xx_reg->host_int); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { writel(0, &ha->qla4_83xx_reg->risc_intr); readl(&ha->qla4_83xx_reg->risc_intr); } @@ -3596,7 +5525,7 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) (ha->pdev->devfn << 11)); ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : QLA82XX_CAM_RAM_DB2); - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *) ((uint8_t *)ha->nx_pcibase); } @@ -3737,8 +5666,8 @@ static struct isp_operations qla4_83xx_isp_ops = { .reset_firmware = qla4_8xxx_stop_firmware, .queue_iocb = qla4_83xx_queue_iocb, .complete_iocb = qla4_83xx_complete_iocb, - .rd_shdw_req_q_out = qla4_83xx_rd_shdw_req_q_out, - .rd_shdw_rsp_q_in = qla4_83xx_rd_shdw_rsp_q_in, + .rd_shdw_req_q_out = qla4xxx_rd_shdw_req_q_out, + .rd_shdw_rsp_q_in = qla4xxx_rd_shdw_rsp_q_in, .get_sys_info = qla4_8xxx_get_sys_info, .rd_reg_direct = qla4_83xx_rd_reg, .wr_reg_direct = qla4_83xx_wr_reg, @@ -3761,11 +5690,6 @@ uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out)); } -uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha) -{ - return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out)); -} - uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) { return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in); @@ -3776,11 +5700,6 @@ uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in)); } -uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) -{ - return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in)); -} - static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) { struct scsi_qla_host *ha = data; @@ -4005,7 +5924,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) if (val & BIT_7) ddb_index[1] = (val & 0x7f); - } else if (is_qla8022(ha)) { + } else if (is_qla80XX(ha)) { buf = dma_alloc_coherent(&ha->pdev->dev, size, &buf_dma, GFP_KERNEL); if (!buf) { @@ -4083,7 +6002,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, int max_chap_entries = 0; struct ql4_chap_table *chap_table; - if (is_qla8022(ha)) + if (is_qla80XX(ha)) max_chap_entries = (ha->hw.flt_chap_size / 2) / sizeof(struct ql4_chap_table); else @@ -4321,64 +6240,6 @@ kset_free: } -/** - * qla4xxx_create chap_list - Create CHAP list from FLASH - * @ha: pointer to adapter structure - * - * Read flash and make a list of CHAP entries, during login when a CHAP entry - * is received, it will be checked in this list. If entry exist then the CHAP - * entry index is set in the DDB. If CHAP entry does not exist in this list - * then a new entry is added in FLASH in CHAP table and the index obtained is - * used in the DDB. - **/ -static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) -{ - int rval = 0; - uint8_t *chap_flash_data = NULL; - uint32_t offset; - dma_addr_t chap_dma; - uint32_t chap_size = 0; - - if (is_qla40XX(ha)) - chap_size = MAX_CHAP_ENTRIES_40XX * - sizeof(struct ql4_chap_table); - else /* Single region contains CHAP info for both - * ports which is divided into half for each port. - */ - chap_size = ha->hw.flt_chap_size / 2; - - chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, - &chap_dma, GFP_KERNEL); - if (!chap_flash_data) { - ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); - return; - } - if (is_qla40XX(ha)) - offset = FLASH_CHAP_OFFSET; - else { - offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); - if (ha->port_num == 1) - offset += chap_size; - } - - rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); - if (rval != QLA_SUCCESS) - goto exit_chap_list; - - if (ha->chap_list == NULL) - ha->chap_list = vmalloc(chap_size); - if (ha->chap_list == NULL) { - ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); - goto exit_chap_list; - } - - memcpy(ha->chap_list, chap_flash_data, chap_size); - -exit_chap_list: - dma_free_coherent(&ha->pdev->dev, chap_size, - chap_flash_data, chap_dma); -} - static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, struct ql4_tuple_ddb *tddb) { @@ -4470,7 +6331,8 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, } static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, - struct dev_db_entry *fw_ddb_entry) + struct dev_db_entry *fw_ddb_entry, + uint32_t *index) { struct ddb_entry *ddb_entry; struct ql4_tuple_ddb *fw_tddb = NULL; @@ -4504,6 +6366,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) { ret = QLA_SUCCESS; /* found */ + if (index != NULL) + *index = idx; goto exit_check; } } @@ -4739,6 +6603,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha, ddb_entry->ha = ha; ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb; ddb_entry->ddb_change = qla4xxx_flash_ddb_change; + ddb_entry->chap_tbl_idx = INVALID_ENTRY; atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); @@ -4800,6 +6665,87 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha) } while (time_after(wtime, jiffies)); } +static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, + struct dev_db_entry *flash_ddb_entry) +{ + uint16_t options = 0; + size_t ip_len = IP_ADDR_LEN; + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) + ip_len = IPv6_ADDR_LEN; + + if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) + return QLA_ERROR; + + if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], + sizeof(fw_ddb_entry->isid))) + return QLA_ERROR; + + if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, + sizeof(fw_ddb_entry->port))) + return QLA_ERROR; + + return QLA_SUCCESS; +} + +static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint32_t fw_idx, uint32_t *flash_index) +{ + struct dev_db_entry *flash_ddb_entry; + dma_addr_t flash_ddb_entry_dma; + uint32_t idx = 0; + int max_ddbs; + int ret = QLA_ERROR, status; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &flash_ddb_entry_dma); + if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "Out of memory\n"); + goto exit_find_st_idx; + } + + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, + flash_ddb_entry_dma, fw_idx); + if (status == QLA_SUCCESS) { + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); + if (status == QLA_SUCCESS) { + *flash_index = fw_idx; + ret = QLA_SUCCESS; + goto exit_find_st_idx; + } + } + + for (idx = 0; idx < max_ddbs; idx++) { + status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, + flash_ddb_entry_dma, idx); + if (status == QLA_ERROR) + continue; + + status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); + if (status == QLA_SUCCESS) { + *flash_index = idx; + ret = QLA_SUCCESS; + goto exit_find_st_idx; + } + } + + if (idx == max_ddbs) + ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", + fw_idx); + +exit_find_st_idx: + if (flash_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, + flash_ddb_entry_dma); + + return ret; +} + static void qla4xxx_build_st_list(struct scsi_qla_host *ha, struct list_head *list_st) { @@ -4811,6 +6757,7 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha, int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; + uint32_t flash_index = -1; uint16_t conn_id = 0; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, @@ -4843,6 +6790,19 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha, if (!st_ddb_idx) break; + ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, + &flash_index); + if (ret == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, + "No flash entry for ST at idx [%d]\n", idx); + st_ddb_idx->flash_ddb_idx = idx; + } else { + ql4_printk(KERN_INFO, ha, + "ST at idx [%d] is stored at flash [%d]\n", + idx, flash_index); + st_ddb_idx->flash_ddb_idx = flash_index; + } + st_ddb_idx->fw_ddb_idx = idx; list_add_tail(&st_ddb_idx->list, list_st); @@ -4887,6 +6847,28 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha, } } +static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct dev_db_entry *fw_ddb_entry) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_session *sess; + uint32_t max_ddbs = 0; + uint16_t ddb_link = -1; + + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + + cls_sess = ddb_entry->sess; + sess = cls_sess->dd_data; + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link < max_ddbs) + sess->discovery_parent_idx = ddb_link; + else + sess->discovery_parent_idx = DDB_NO_LINK; +} + static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, struct dev_db_entry *fw_ddb_entry, int is_reset, uint16_t idx) @@ -4951,6 +6933,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, /* Update sess/conn params */ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); + qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); if (is_reset == RESET_ADAPTER) { iscsi_block_session(cls_sess); @@ -4967,17 +6950,43 @@ exit_setup: return ret; } +static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, + struct list_head *list_ddb, + struct dev_db_entry *fw_ddb_entry) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + uint16_t ddb_link; + + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { + if (ddb_idx->fw_ddb_idx == ddb_link) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Updating NT parent idx from [%d] to [%d]\n", + ddb_link, ddb_idx->flash_ddb_idx)); + fw_ddb_entry->ddb_link = + cpu_to_le16(ddb_idx->flash_ddb_idx); + return; + } + } +} + static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, - struct list_head *list_nt, int is_reset) + struct list_head *list_nt, + struct list_head *list_st, + int is_reset) { struct dev_db_entry *fw_ddb_entry; + struct ddb_entry *ddb_entry = NULL; dma_addr_t fw_ddb_dma; int max_ddbs; int fw_idx_size; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; + uint32_t ddb_idx = -1; uint16_t conn_id = 0; + uint16_t ddb_link = -1; struct qla_ddb_index *nt_ddb_idx; fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, @@ -5004,12 +7013,18 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, if (strlen((char *) fw_ddb_entry->iscsi_name) == 0) goto continue_next_nt; + ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + if (ddb_link < max_ddbs) + qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); + if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || - state == DDB_DS_SESSION_FAILED)) + state == DDB_DS_SESSION_FAILED) && + (is_reset == INIT_ADAPTER)) goto continue_next_nt; DEBUG2(ql4_printk(KERN_INFO, ha, "Adding DDB to session = 0x%x\n", idx)); + if (is_reset == INIT_ADAPTER) { nt_ddb_idx = vmalloc(fw_idx_size); if (!nt_ddb_idx) @@ -5039,9 +7054,17 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, list_add_tail(&nt_ddb_idx->list, list_nt); } else if (is_reset == RESET_ADAPTER) { - if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == - QLA_SUCCESS) + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, + &ddb_idx); + if (ret == QLA_SUCCESS) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, + ddb_idx); + if (ddb_entry != NULL) + qla4xxx_update_sess_disc_idx(ha, + ddb_entry, + fw_ddb_entry); goto continue_next_nt; + } } ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); @@ -5058,6 +7081,1376 @@ exit_nt_list: dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); } +static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, + struct list_head *list_nt, + uint16_t target_id) +{ + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_dma; + int max_ddbs; + int fw_idx_size; + int ret; + uint32_t idx = 0, next_idx = 0; + uint32_t state = 0, conn_err = 0; + uint16_t conn_id = 0; + struct qla_ddb_index *nt_ddb_idx; + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); + goto exit_new_nt_list; + } + max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : + MAX_DEV_DB_ENTRIES; + fw_idx_size = sizeof(struct qla_ddb_index); + + for (idx = 0; idx < max_ddbs; idx = next_idx) { + ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma, + NULL, &next_idx, &state, + &conn_err, NULL, &conn_id); + if (ret == QLA_ERROR) + break; + + /* Check if NT, then add it to list */ + if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) + goto continue_next_new_nt; + + if (!(state == DDB_DS_NO_CONNECTION_ACTIVE)) + goto continue_next_new_nt; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Adding DDB to session = 0x%x\n", idx)); + + nt_ddb_idx = vmalloc(fw_idx_size); + if (!nt_ddb_idx) + break; + + nt_ddb_idx->fw_ddb_idx = idx; + + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); + if (ret == QLA_SUCCESS) { + /* free nt_ddb_idx and do not add to list_nt */ + vfree(nt_ddb_idx); + goto continue_next_new_nt; + } + + if (target_id < max_ddbs) + fw_ddb_entry->ddb_link = cpu_to_le16(target_id); + + list_add_tail(&nt_ddb_idx->list, list_nt); + + ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, + idx); + if (ret == QLA_ERROR) + goto exit_new_nt_list; + +continue_next_new_nt: + if (next_idx == 0) + break; + } + +exit_new_nt_list: + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); +} + +/** + * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry + * @dev: dev associated with the sysfs entry + * @data: pointer to flashnode session object + * + * Returns: + * 1: if flashnode entry is non-persistent + * 0: if flashnode entry is persistent + **/ +static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) +{ + struct iscsi_bus_flash_session *fnode_sess; + + if (!iscsi_flashnode_bus_match(dev, NULL)) + return 0; + + fnode_sess = iscsi_dev_to_flash_session(dev); + + return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT); +} + +/** + * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target + * @ha: pointer to host + * @fw_ddb_entry: flash ddb data + * @idx: target index + * @user: if set then this call is made from userland else from kernel + * + * Returns: + * On sucess: QLA_SUCCESS + * On failure: QLA_ERROR + * + * This create separate sysfs entries for session and connection attributes of + * the given fw ddb entry. + * If this is invoked as a result of a userspace call then the entry is marked + * as nonpersistent using flash_state field. + **/ +static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t *idx, int user) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + struct iscsi_bus_flash_conn *fnode_conn = NULL; + int rc = QLA_ERROR; + + fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx, + &qla4xxx_iscsi_transport, 0); + if (!fnode_sess) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", + __func__, *idx, ha->host_no); + goto exit_tgt_create; + } + + fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, + &qla4xxx_iscsi_transport, 0); + if (!fnode_conn) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", + __func__, *idx, ha->host_no); + goto free_sess; + } + + if (user) { + fnode_sess->flash_state = DEV_DB_NON_PERSISTENT; + } else { + fnode_sess->flash_state = DEV_DB_PERSISTENT; + + if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx) + fnode_sess->is_boot_target = 1; + else + fnode_sess->is_boot_target = 0; + } + + rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, + fw_ddb_entry); + + ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", + __func__, fnode_sess->dev.kobj.name); + + ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", + __func__, fnode_conn->dev.kobj.name); + + return QLA_SUCCESS; + +free_sess: + iscsi_destroy_flashnode_sess(fnode_sess); + +exit_tgt_create: + return QLA_ERROR; +} + +/** + * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash + * @shost: pointer to host + * @buf: type of ddb entry (ipv4/ipv6) + * @len: length of buf + * + * This creates new ddb entry in the flash by finding first free index and + * storing default ddb there. And then create sysfs entry for the new ddb entry. + **/ +static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, + int len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + struct device *dev; + uint16_t idx = 0; + uint16_t max_ddbs = 0; + uint32_t options = 0; + uint32_t rval = QLA_ERROR; + + if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) && + strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n", + __func__)); + goto exit_ddb_add; + } + + max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : + MAX_DEV_DB_ENTRIES; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + goto exit_ddb_add; + } + + dev = iscsi_find_flashnode_sess(ha->host, NULL, + qla4xxx_sysfs_ddb_is_non_persistent); + if (dev) { + ql4_printk(KERN_ERR, ha, + "%s: A non-persistent entry %s found\n", + __func__, dev->kobj.name); + put_device(dev); + goto exit_ddb_add; + } + + /* Index 0 and 1 are reserved for boot target entries */ + for (idx = 2; idx < max_ddbs; idx++) { + if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, + fw_ddb_entry_dma, idx)) + break; + } + + if (idx == max_ddbs) + goto exit_ddb_add; + + if (!strncasecmp("ipv6", buf, 4)) + options |= IPV6_DEFAULT_DDB_ENTRY; + + rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (rval == QLA_ERROR) + goto exit_ddb_add; + + rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1); + +exit_ddb_add: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + if (rval == QLA_SUCCESS) + return idx; + else + return -EIO; +} + +/** + * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * + * This writes the contents of target ddb buffer to Flash with a valid cookie + * value in order to make the ddb entry persistent. + **/ +static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint32_t options = 0; + int rval = 0; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + rval = -ENOMEM; + goto exit_ddb_apply; + } + + if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + options |= IPV6_DEFAULT_DDB_ENTRY; + + rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (rval == QLA_ERROR) + goto exit_ddb_apply; + + dev_db_start_offset += (fnode_sess->target_id * + sizeof(*fw_ddb_entry)); + + qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); + fw_ddb_entry->cookie = DDB_VALID_COOKIE; + + rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT); + + if (rval == QLA_SUCCESS) { + fnode_sess->flash_state = DEV_DB_PERSISTENT; + ql4_printk(KERN_INFO, ha, + "%s: flash node %u of host %lu written to flash\n", + __func__, fnode_sess->target_id, ha->host_no); + } else { + rval = -EIO; + ql4_printk(KERN_ERR, ha, + "%s: Error while writing flash node %u of host %lu to flash\n", + __func__, fnode_sess->target_id, ha->host_no); + } + +exit_ddb_apply: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return rval; +} + +static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t idx) +{ + struct dev_db_entry *ddb_entry = NULL; + dma_addr_t ddb_entry_dma; + unsigned long wtime; + uint32_t mbx_sts = 0; + uint32_t state = 0, conn_err = 0; + uint16_t tmo = 0; + int ret = 0; + + ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry), + &ddb_entry_dma, GFP_KERNEL); + if (!ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + return QLA_ERROR; + } + + memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry)); + + ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts); + if (ret != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to set ddb entry for index %d\n", + __func__, idx)); + goto exit_ddb_conn_open; + } + + qla4xxx_conn_open(ha, idx); + + /* To ensure that sendtargets is done, wait for at least 12 secs */ + tmo = ((ha->def_timeout > LOGIN_TOV) && + (ha->def_timeout < LOGIN_TOV * 10) ? + ha->def_timeout : LOGIN_TOV); + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Default time to wait for login to ddb %d\n", tmo)); + + wtime = jiffies + (HZ * tmo); + do { + ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, + NULL, &state, &conn_err, NULL, + NULL); + if (ret == QLA_ERROR) + continue; + + if (state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED) + break; + + schedule_timeout_uninterruptible(HZ / 10); + } while (time_after(wtime, jiffies)); + +exit_ddb_conn_open: + if (ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry), + ddb_entry, ddb_entry_dma); + return ret; +} + +static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t target_id) +{ + struct qla_ddb_index *ddb_idx, *ddb_idx_tmp; + struct list_head list_nt; + uint16_t ddb_index; + int ret = 0; + + if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) { + ql4_printk(KERN_WARNING, ha, + "%s: A discovery already in progress!\n", __func__); + return QLA_ERROR; + } + + INIT_LIST_HEAD(&list_nt); + + set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); + + ret = qla4xxx_get_ddb_index(ha, &ddb_index); + if (ret == QLA_ERROR) + goto exit_login_st_clr_bit; + + ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index); + if (ret == QLA_ERROR) + goto exit_login_st; + + qla4xxx_build_new_nt_list(ha, &list_nt, target_id); + + list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) { + list_del_init(&ddb_idx->list); + qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx); + vfree(ddb_idx); + } + +exit_login_st: + if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, + "Unable to clear DDB index = 0x%x\n", ddb_index); + } + + clear_bit(ddb_index, ha->ddb_idx_map); + +exit_login_st_clr_bit: + clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags); + return ret; +} + +static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t idx) +{ + int ret = QLA_ERROR; + + ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL); + if (ret != QLA_SUCCESS) + ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, + idx); + else + ret = -EPERM; + + return ret; +} + +/** + * qla4xxx_sysfs_ddb_login - Login to the specified target + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * + * This logs in to the specified target + **/ +static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint32_t options = 0; + int ret = 0; + + if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) { + ql4_printk(KERN_ERR, ha, + "%s: Target info is not persistent\n", __func__); + ret = -EIO; + goto exit_ddb_login; + } + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + ret = -ENOMEM; + goto exit_ddb_login; + } + + if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + options |= IPV6_DEFAULT_DDB_ENTRY; + + ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (ret == QLA_ERROR) + goto exit_ddb_login; + + qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); + fw_ddb_entry->cookie = DDB_VALID_COOKIE; + + if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) + ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, + fnode_sess->target_id); + else + ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, + fnode_sess->target_id); + + if (ret > 0) + ret = -EIO; + +exit_ddb_login: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return ret; +} + +/** + * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target + * @cls_sess: pointer to session to be logged out + * + * This performs session log out from the specified target + **/ +static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry = NULL; + struct scsi_qla_host *ha; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + unsigned long flags; + unsigned long wtime; + uint32_t ddb_state; + int options; + int ret = 0; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + if (ddb_entry->ddb_type != FLASH_DDB) { + ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n", + __func__); + ret = -ENXIO; + goto exit_ddb_logout; + } + + if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) { + ql4_printk(KERN_ERR, ha, + "%s: Logout from boot target entry is not permitted.\n", + __func__); + ret = -EPERM; + goto exit_ddb_logout; + } + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags)) + goto ddb_logout_init; + + ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto ddb_logout_init; + + if (ddb_state == DDB_DS_SESSION_ACTIVE) + goto ddb_logout_init; + + /* wait until next relogin is triggered using DF_RELOGIN and + * clear DF_RELOGIN to avoid invocation of further relogin + */ + wtime = jiffies + (HZ * RELOGIN_TOV); + do { + if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags)) + goto ddb_logout_init; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +ddb_logout_init: + atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); + atomic_set(&ddb_entry->relogin_timer, 0); + + options = LOGOUT_OPTION_CLOSE_SESSION; + qla4xxx_session_logout_ddb(ha, ddb_entry, options); + + memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); + wtime = jiffies + (HZ * LOGOUT_TOV); + do { + ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (ret == QLA_ERROR) + goto ddb_logout_clr_sess; + + if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || + (ddb_state == DDB_DS_SESSION_FAILED)) + goto ddb_logout_clr_sess; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +ddb_logout_clr_sess: + qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); + /* + * we have decremented the reference count of the driver + * when we setup the session to have the driver unload + * to be seamless without actually destroying the + * session + **/ + try_module_get(qla4xxx_iscsi_transport.owner); + iscsi_destroy_endpoint(ddb_entry->conn->ep); + + spin_lock_irqsave(&ha->hardware_lock, flags); + qla4xxx_free_ddb(ha, ddb_entry); + clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + iscsi_session_teardown(ddb_entry->sess); + + clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags); + ret = QLA_SUCCESS; + +exit_ddb_logout: + if (fw_ddb_entry) + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return ret; +} + +/** + * qla4xxx_sysfs_ddb_logout - Logout from the specified target + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * + * This performs log out from the specified target + **/ +static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct ql4_tuple_ddb *flash_tddb = NULL; + struct ql4_tuple_ddb *tmp_tddb = NULL; + struct dev_db_entry *fw_ddb_entry = NULL; + struct ddb_entry *ddb_entry = NULL; + dma_addr_t fw_ddb_dma; + uint32_t next_idx = 0; + uint32_t state = 0, conn_err = 0; + uint16_t conn_id = 0; + int idx, index; + int status, ret = 0; + + fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, + &fw_ddb_dma); + if (fw_ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + flash_tddb = vzalloc(sizeof(*flash_tddb)); + if (!flash_tddb) { + ql4_printk(KERN_WARNING, ha, + "%s:Memory Allocation failed.\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + tmp_tddb = vzalloc(sizeof(*tmp_tddb)); + if (!tmp_tddb) { + ql4_printk(KERN_WARNING, ha, + "%s:Memory Allocation failed.\n", __func__); + ret = -ENOMEM; + goto exit_ddb_logout; + } + + if (!fnode_sess->targetname) { + ql4_printk(KERN_ERR, ha, + "%s:Cannot logout from SendTarget entry\n", + __func__); + ret = -EPERM; + goto exit_ddb_logout; + } + + if (fnode_sess->is_boot_target) { + ql4_printk(KERN_ERR, ha, + "%s: Logout from boot target entry is not permitted.\n", + __func__); + ret = -EPERM; + goto exit_ddb_logout; + } + + strncpy(flash_tddb->iscsi_name, fnode_sess->targetname, + ISCSI_NAME_SIZE); + + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress); + else + sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress); + + flash_tddb->tpgt = fnode_sess->tpgt; + flash_tddb->port = fnode_conn->port; + + COPY_ISID(flash_tddb->isid, fnode_sess->isid); + + for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx); + if (ddb_entry == NULL) + continue; + + if (ddb_entry->ddb_type != FLASH_DDB) + continue; + + index = ddb_entry->sess->target_id; + status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry, + fw_ddb_dma, NULL, &next_idx, + &state, &conn_err, NULL, + &conn_id); + if (status == QLA_ERROR) { + ret = -ENOMEM; + break; + } + + qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL); + + status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, + true); + if (status == QLA_SUCCESS) { + ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); + break; + } + } + + if (idx == MAX_DDB_ENTRIES) + ret = -ESRCH; + +exit_ddb_logout: + if (flash_tddb) + vfree(flash_tddb); + if (tmp_tddb) + vfree(tmp_tddb); + if (fw_ddb_entry) + dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); + + return ret; +} + +static int +qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, + int param, char *buf) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_bus_flash_conn *fnode_conn; + struct ql4_chap_table chap_tbl; + struct device *dev; + int parent_type; + int rc = 0; + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) + return -EIO; + + fnode_conn = iscsi_dev_to_flash_conn(dev); + + switch (param) { + case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: + rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6); + break; + case ISCSI_FLASHNODE_PORTAL_TYPE: + rc = sprintf(buf, "%s\n", fnode_sess->portal_type); + break; + case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: + rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable); + break; + case ISCSI_FLASHNODE_DISCOVERY_SESS: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess); + break; + case ISCSI_FLASHNODE_ENTRY_EN: + rc = sprintf(buf, "%u\n", fnode_sess->entry_state); + break; + case ISCSI_FLASHNODE_HDR_DGST_EN: + rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); + break; + case ISCSI_FLASHNODE_DATA_DGST_EN: + rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); + break; + case ISCSI_FLASHNODE_IMM_DATA_EN: + rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); + break; + case ISCSI_FLASHNODE_INITIAL_R2T_EN: + rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); + break; + case ISCSI_FLASHNODE_DATASEQ_INORDER: + rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); + break; + case ISCSI_FLASHNODE_PDU_INORDER: + rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); + break; + case ISCSI_FLASHNODE_CHAP_AUTH_EN: + rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en); + break; + case ISCSI_FLASHNODE_SNACK_REQ_EN: + rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en); + break; + case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en); + break; + case ISCSI_FLASHNODE_BIDI_CHAP_EN: + rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en); + break; + case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional); + break; + case ISCSI_FLASHNODE_ERL: + rc = sprintf(buf, "%u\n", fnode_sess->erl); + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat); + break; + case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable); + break; + case ISCSI_FLASHNODE_TCP_WSF_DISABLE: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable); + break; + case ISCSI_FLASHNODE_TCP_TIMER_SCALE: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale); + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en); + break; + case ISCSI_FLASHNODE_IP_FRAG_DISABLE: + rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable); + break; + case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: + rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); + break; + case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: + rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); + break; + case ISCSI_FLASHNODE_FIRST_BURST: + rc = sprintf(buf, "%u\n", fnode_sess->first_burst); + break; + case ISCSI_FLASHNODE_DEF_TIME2WAIT: + rc = sprintf(buf, "%u\n", fnode_sess->time2wait); + break; + case ISCSI_FLASHNODE_DEF_TIME2RETAIN: + rc = sprintf(buf, "%u\n", fnode_sess->time2retain); + break; + case ISCSI_FLASHNODE_MAX_R2T: + rc = sprintf(buf, "%u\n", fnode_sess->max_r2t); + break; + case ISCSI_FLASHNODE_KEEPALIVE_TMO: + rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout); + break; + case ISCSI_FLASHNODE_ISID: + rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", + fnode_sess->isid[0], fnode_sess->isid[1], + fnode_sess->isid[2], fnode_sess->isid[3], + fnode_sess->isid[4], fnode_sess->isid[5]); + break; + case ISCSI_FLASHNODE_TSID: + rc = sprintf(buf, "%u\n", fnode_sess->tsid); + break; + case ISCSI_FLASHNODE_PORT: + rc = sprintf(buf, "%d\n", fnode_conn->port); + break; + case ISCSI_FLASHNODE_MAX_BURST: + rc = sprintf(buf, "%u\n", fnode_sess->max_burst); + break; + case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: + rc = sprintf(buf, "%u\n", + fnode_sess->default_taskmgmt_timeout); + break; + case ISCSI_FLASHNODE_IPADDR: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); + else + rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); + break; + case ISCSI_FLASHNODE_ALIAS: + if (fnode_sess->targetalias) + rc = sprintf(buf, "%s\n", fnode_sess->targetalias); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_REDIRECT_IPADDR: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%pI6\n", + fnode_conn->redirect_ipaddr); + else + rc = sprintf(buf, "%pI4\n", + fnode_conn->redirect_ipaddr); + break; + case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: + rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); + break; + case ISCSI_FLASHNODE_LOCAL_PORT: + rc = sprintf(buf, "%u\n", fnode_conn->local_port); + break; + case ISCSI_FLASHNODE_IPV4_TOS: + rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos); + break; + case ISCSI_FLASHNODE_IPV6_TC: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%u\n", + fnode_conn->ipv6_traffic_class); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: + rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label); + break; + case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: + if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) + rc = sprintf(buf, "%pI6\n", + fnode_conn->link_local_ipv6_addr); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: + rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx); + break; + case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE: + if (fnode_sess->discovery_parent_type == DDB_ISNS) + parent_type = ISCSI_DISC_PARENT_ISNS; + else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) + parent_type = ISCSI_DISC_PARENT_UNKNOWN; + else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) + parent_type = ISCSI_DISC_PARENT_SENDTGT; + else + parent_type = ISCSI_DISC_PARENT_UNKNOWN; + + rc = sprintf(buf, "%s\n", + iscsi_get_discovery_parent_name(parent_type)); + break; + case ISCSI_FLASHNODE_NAME: + if (fnode_sess->targetname) + rc = sprintf(buf, "%s\n", fnode_sess->targetname); + else + rc = sprintf(buf, "\n"); + break; + case ISCSI_FLASHNODE_TPGT: + rc = sprintf(buf, "%u\n", fnode_sess->tpgt); + break; + case ISCSI_FLASHNODE_TCP_XMIT_WSF: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); + break; + case ISCSI_FLASHNODE_TCP_RECV_WSF: + rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); + break; + case ISCSI_FLASHNODE_CHAP_OUT_IDX: + rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx); + break; + case ISCSI_FLASHNODE_USERNAME: + if (fnode_sess->chap_auth_en) { + qla4xxx_get_uni_chap_at_index(ha, + chap_tbl.name, + chap_tbl.secret, + fnode_sess->chap_out_idx); + rc = sprintf(buf, "%s\n", chap_tbl.name); + } else { + rc = sprintf(buf, "\n"); + } + break; + case ISCSI_FLASHNODE_PASSWORD: + if (fnode_sess->chap_auth_en) { + qla4xxx_get_uni_chap_at_index(ha, + chap_tbl.name, + chap_tbl.secret, + fnode_sess->chap_out_idx); + rc = sprintf(buf, "%s\n", chap_tbl.secret); + } else { + rc = sprintf(buf, "\n"); + } + break; + case ISCSI_FLASHNODE_STATSN: + rc = sprintf(buf, "%u\n", fnode_conn->statsn); + break; + case ISCSI_FLASHNODE_EXP_STATSN: + rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); + break; + case ISCSI_FLASHNODE_IS_BOOT_TGT: + rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target); + break; + default: + rc = -ENOSYS; + break; + } + + put_device(dev); + return rc; +} + +/** + * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry + * @fnode_sess: pointer to session attrs of flash ddb entry + * @fnode_conn: pointer to connection attrs of flash ddb entry + * @data: Parameters and their values to update + * @len: len of data + * + * This sets the parameter of flash ddb entry and writes them to flash + **/ +static int +qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_bus_flash_conn *fnode_conn, + void *data, int len) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + struct iscsi_flashnode_param_info *fnode_param; + struct ql4_chap_table chap_tbl; + struct nlattr *attr; + uint16_t chap_out_idx = INVALID_ENTRY; + int rc = QLA_ERROR; + uint32_t rem = len; + + memset((void *)&chap_tbl, 0, sizeof(chap_tbl)); + nla_for_each_attr(attr, data, len, rem) { + fnode_param = nla_data(attr); + + switch (fnode_param->param) { + case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6: + fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_PORTAL_TYPE: + memcpy(fnode_sess->portal_type, fnode_param->value, + strlen(fnode_sess->portal_type)); + break; + case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE: + fnode_sess->auto_snd_tgt_disable = + fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DISCOVERY_SESS: + fnode_sess->discovery_sess = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_ENTRY_EN: + fnode_sess->entry_state = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_HDR_DGST_EN: + fnode_conn->hdrdgst_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DATA_DGST_EN: + fnode_conn->datadgst_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IMM_DATA_EN: + fnode_sess->imm_data_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_INITIAL_R2T_EN: + fnode_sess->initial_r2t_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DATASEQ_INORDER: + fnode_sess->dataseq_inorder_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_PDU_INORDER: + fnode_sess->pdu_inorder_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_CHAP_AUTH_EN: + fnode_sess->chap_auth_en = fnode_param->value[0]; + /* Invalidate chap index if chap auth is disabled */ + if (!fnode_sess->chap_auth_en) + fnode_sess->chap_out_idx = INVALID_ENTRY; + + break; + case ISCSI_FLASHNODE_SNACK_REQ_EN: + fnode_conn->snack_req_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN: + fnode_sess->discovery_logout_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_BIDI_CHAP_EN: + fnode_sess->bidi_chap_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL: + fnode_sess->discovery_auth_optional = + fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_ERL: + fnode_sess->erl = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT: + fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE: + fnode_conn->tcp_nagle_disable = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_WSF_DISABLE: + fnode_conn->tcp_wsf_disable = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_TIMER_SCALE: + fnode_conn->tcp_timer_scale = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN: + fnode_conn->tcp_timestamp_en = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IP_FRAG_DISABLE: + fnode_conn->fragment_disable = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_MAX_RECV_DLENGTH: + fnode_conn->max_recv_dlength = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH: + fnode_conn->max_xmit_dlength = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_FIRST_BURST: + fnode_sess->first_burst = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_DEF_TIME2WAIT: + fnode_sess->time2wait = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_DEF_TIME2RETAIN: + fnode_sess->time2retain = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_MAX_R2T: + fnode_sess->max_r2t = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_KEEPALIVE_TMO: + fnode_conn->keepalive_timeout = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_ISID: + memcpy(fnode_sess->isid, fnode_param->value, + sizeof(fnode_sess->isid)); + break; + case ISCSI_FLASHNODE_TSID: + fnode_sess->tsid = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_PORT: + fnode_conn->port = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_MAX_BURST: + fnode_sess->max_burst = *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO: + fnode_sess->default_taskmgmt_timeout = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_IPADDR: + memcpy(fnode_conn->ipaddress, fnode_param->value, + IPv6_ADDR_LEN); + break; + case ISCSI_FLASHNODE_ALIAS: + rc = iscsi_switch_str_param(&fnode_sess->targetalias, + (char *)fnode_param->value); + break; + case ISCSI_FLASHNODE_REDIRECT_IPADDR: + memcpy(fnode_conn->redirect_ipaddr, fnode_param->value, + IPv6_ADDR_LEN); + break; + case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE: + fnode_conn->max_segment_size = + *(unsigned *)fnode_param->value; + break; + case ISCSI_FLASHNODE_LOCAL_PORT: + fnode_conn->local_port = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_IPV4_TOS: + fnode_conn->ipv4_tos = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IPV6_TC: + fnode_conn->ipv6_traffic_class = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_IPV6_FLOW_LABEL: + fnode_conn->ipv6_flow_label = fnode_param->value[0]; + break; + case ISCSI_FLASHNODE_NAME: + rc = iscsi_switch_str_param(&fnode_sess->targetname, + (char *)fnode_param->value); + break; + case ISCSI_FLASHNODE_TPGT: + fnode_sess->tpgt = *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_LINK_LOCAL_IPV6: + memcpy(fnode_conn->link_local_ipv6_addr, + fnode_param->value, IPv6_ADDR_LEN); + break; + case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: + fnode_sess->discovery_parent_idx = + *(uint16_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_TCP_XMIT_WSF: + fnode_conn->tcp_xmit_wsf = + *(uint8_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_TCP_RECV_WSF: + fnode_conn->tcp_recv_wsf = + *(uint8_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_STATSN: + fnode_conn->statsn = *(uint32_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_EXP_STATSN: + fnode_conn->exp_statsn = + *(uint32_t *)fnode_param->value; + break; + case ISCSI_FLASHNODE_CHAP_OUT_IDX: + chap_out_idx = *(uint16_t *)fnode_param->value; + if (!qla4xxx_get_uni_chap_at_index(ha, + chap_tbl.name, + chap_tbl.secret, + chap_out_idx)) { + fnode_sess->chap_out_idx = chap_out_idx; + /* Enable chap auth if chap index is valid */ + fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; + } + break; + default: + ql4_printk(KERN_ERR, ha, + "%s: No such sysfs attribute\n", __func__); + rc = -ENOSYS; + goto exit_set_param; + } + } + + rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); + +exit_set_param: + return rc; +} + +/** + * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry + * @fnode_sess: pointer to session attrs of flash ddb entry + * + * This invalidates the flash ddb entry at the given index + **/ +static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) +{ + struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); + struct scsi_qla_host *ha = to_qla_host(shost); + uint32_t dev_db_start_offset; + uint32_t dev_db_end_offset; + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint16_t *ddb_cookie = NULL; + size_t ddb_size = 0; + void *pddb = NULL; + int target_id; + int rc = 0; + + if (fnode_sess->is_boot_target) { + rc = -EPERM; + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Deletion of boot target entry is not permitted.\n", + __func__)); + goto exit_ddb_del; + } + + if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) + goto sysfs_ddb_del; + + if (is_qla40XX(ha)) { + dev_db_start_offset = FLASH_OFFSET_DB_INFO; + dev_db_end_offset = FLASH_OFFSET_DB_END; + dev_db_start_offset += (fnode_sess->target_id * + sizeof(*fw_ddb_entry)); + ddb_size = sizeof(*fw_ddb_entry); + } else { + dev_db_start_offset = FLASH_RAW_ACCESS_ADDR + + (ha->hw.flt_region_ddb << 2); + /* flt_ddb_size is DDB table size for both ports + * so divide it by 2 to calculate the offset for second port + */ + if (ha->port_num == 1) + dev_db_start_offset += (ha->hw.flt_ddb_size / 2); + + dev_db_end_offset = dev_db_start_offset + + (ha->hw.flt_ddb_size / 2); + + dev_db_start_offset += (fnode_sess->target_id * + sizeof(*fw_ddb_entry)); + dev_db_start_offset += offsetof(struct dev_db_entry, cookie); + + ddb_size = sizeof(*ddb_cookie); + } + + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n", + __func__, dev_db_start_offset, dev_db_end_offset)); + + if (dev_db_start_offset > dev_db_end_offset) { + rc = -EIO; + DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n", + __func__, fnode_sess->target_id)); + goto exit_ddb_del; + } + + pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size, + &fw_ddb_entry_dma, GFP_KERNEL); + if (!pddb) { + rc = -ENOMEM; + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + goto exit_ddb_del; + } + + if (is_qla40XX(ha)) { + fw_ddb_entry = pddb; + memset(fw_ddb_entry, 0, ddb_size); + ddb_cookie = &fw_ddb_entry->cookie; + } else { + ddb_cookie = pddb; + } + + /* invalidate the cookie */ + *ddb_cookie = 0xFFEE; + qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + ddb_size, FLASH_OPT_RMW_COMMIT); + +sysfs_ddb_del: + target_id = fnode_sess->target_id; + iscsi_destroy_flashnode_sess(fnode_sess); + ql4_printk(KERN_INFO, ha, + "%s: session and conn entries for flashnode %u of host %lu deleted\n", + __func__, target_id, ha->host_no); +exit_ddb_del: + if (pddb) + dma_free_coherent(&ha->pdev->dev, ddb_size, pddb, + fw_ddb_entry_dma); + return rc; +} + +/** + * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs + * @ha: pointer to adapter structure + * + * Export the firmware DDB for all send targets and normal targets to sysfs. + **/ +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) +{ + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + uint16_t max_ddbs; + uint16_t idx = 0; + int ret = QLA_SUCCESS; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, + sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + return -ENOMEM; + } + + max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : + MAX_DEV_DB_ENTRIES; + + for (idx = 0; idx < max_ddbs; idx++) { + if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, + idx)) + continue; + + ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0); + if (ret) { + ret = -EIO; + break; + } + } + + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, + fw_ddb_entry_dma); + + return ret; +} + +static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha) +{ + iscsi_destroy_all_flashnode(ha->host); +} + /** * qla4xxx_build_ddb_list - Build ddb list and setup sessions * @ha: pointer to adapter structure @@ -5113,11 +8506,10 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset) schedule_timeout_uninterruptible(HZ / 10); } while (time_after(wtime, jiffies)); - /* Free up the sendtargets list */ - qla4xxx_free_ddb_list(&list_st); - qla4xxx_build_nt_list(ha, &list_nt, is_reset); + qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); + qla4xxx_free_ddb_list(&list_st); qla4xxx_free_ddb_list(&list_nt); qla4xxx_free_ddb_index(ha); @@ -5250,7 +8642,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; - } else if (is_qla8032(ha)) { + } else if (is_qla8032(ha) || is_qla8042(ha)) { ha->isp_ops = &qla4_83xx_isp_ops; ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl; } else { @@ -5281,6 +8673,9 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, mutex_init(&ha->chap_sem); init_completion(&ha->mbx_intr_comp); init_completion(&ha->disable_acb_comp); + init_completion(&ha->idc_comp); + init_completion(&ha->link_up_comp); + init_completion(&ha->disable_acb_comp); spin_lock_init(&ha->hardware_lock); spin_lock_init(&ha->work_lock); @@ -5321,7 +8716,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, if (is_qla80XX(ha)) qla4_8xxx_get_flash_info(ha); - if (is_qla8032(ha)) { + if (is_qla8032(ha) || is_qla8042(ha)) { qla4_83xx_read_reset_template(ha); /* * NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0. @@ -5341,7 +8736,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); /* Dont retry adapter initialization if IRQ allocation failed */ - if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) + if (is_qla80XX(ha) && (status == QLA_ERROR)) goto skip_retry_init; while ((!test_bit(AF_ONLINE, &ha->flags)) && @@ -5366,6 +8761,10 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, continue; status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); + if (is_qla80XX(ha) && (status == QLA_ERROR)) { + if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) + goto skip_retry_init; + } } skip_retry_init: @@ -5373,7 +8772,8 @@ skip_retry_init: ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); if ((is_qla8022(ha) && ql4xdontresethba) || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { /* Put the device in failed state. */ DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); ha->isp_ops->idc_lock(ha); @@ -5397,8 +8797,8 @@ skip_retry_init: } INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); - sprintf(buf, "qla4xxx_%lu_task", ha->host_no); - ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1); + ha->task_wq = alloc_workqueue("qla4xxx_%lu_task", WQ_MEM_RECLAIM, 1, + ha->host_no); if (!ha->task_wq) { ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); ret = -ENODEV; @@ -5434,8 +8834,8 @@ skip_retry_init: " QLogic iSCSI HBA Driver version: %s\n" " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), - ha->host_no, ha->firmware_version[0], ha->firmware_version[1], - ha->patch_number, ha->build_number); + ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor, + ha->fw_info.fw_patch, ha->fw_info.fw_build); /* Set the driver version */ if (is_qla80XX(ha)) @@ -5445,7 +8845,8 @@ skip_retry_init: ql4_printk(KERN_ERR, ha, "%s: No iSCSI boot target configured\n", __func__); - /* Perform the build ddb list and login to each */ + set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); + /* Perform the build ddb list and login to each */ qla4xxx_build_ddb_list(ha, INIT_ADAPTER); iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); qla4xxx_wait_login_resp_boot_tgt(ha); @@ -5509,10 +8910,56 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha) } } +static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry) +{ + struct dev_db_entry *fw_ddb_entry = NULL; + dma_addr_t fw_ddb_entry_dma; + unsigned long wtime; + uint32_t ddb_state; + int options; + int status; + + options = LOGOUT_OPTION_CLOSE_SESSION; + if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); + goto clear_ddb; + } + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + goto clear_ddb; + } + + wtime = jiffies + (HZ * LOGOUT_TOV); + do { + status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry, fw_ddb_entry_dma, + NULL, NULL, &ddb_state, NULL, + NULL, NULL); + if (status == QLA_ERROR) + goto free_ddb; + + if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || + (ddb_state == DDB_DS_SESSION_FAILED)) + goto free_ddb; + + schedule_timeout_uninterruptible(HZ); + } while ((time_after(wtime, jiffies))); + +free_ddb: + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); +clear_ddb: + qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); +} + static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) { struct ddb_entry *ddb_entry; - int options; int idx; for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { @@ -5521,13 +8968,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) if ((ddb_entry != NULL) && (ddb_entry->ddb_type == FLASH_DDB)) { - options = LOGOUT_OPTION_CLOSE_SESSION; - if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) - == QLA_ERROR) - ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", - __func__); - - qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); + qla4xxx_destroy_ddb(ha, ddb_entry); /* * we have decremented the reference count of the driver * when we setup the session to have the driver unload @@ -5570,6 +9011,7 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev) qla4xxx_destroy_fw_ddb_session(ha); qla4_8xxx_free_sysfs_attr(ha); + qla4xxx_sysfs_ddb_remove(ha); scsi_remove_host(ha->host); qla4xxx_free_adapter(ha); @@ -5578,7 +9020,6 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev) pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } /** @@ -5669,7 +9110,6 @@ struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, /* update counters */ if (srb->flags & SRB_DMA_VALID) { - ha->req_q_count += srb->iocb_cnt; ha->iocb_cnt -= srb->iocb_cnt; if (srb->cmd) srb->cmd->host_scribble = @@ -5789,14 +9229,15 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) int ret = SUCCESS; int wait = 0; - ql4_printk(KERN_INFO, ha, - "scsi%ld:%d:%d: Abort command issued cmd=%p\n", - ha->host_no, id, lun, cmd); + ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n", + ha->host_no, id, lun, cmd, cmd->cmnd[0]); spin_lock_irqsave(&ha->hardware_lock, flags); srb = (struct srb *) CMD_SP(cmd); if (!srb) { spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n", + ha->host_no, id, lun); return SUCCESS; } kref_get(&srb->srb_ref); @@ -5978,16 +9419,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) ha = to_qla_host(cmd->device->host); - if (is_qla8032(ha) && ql4xdontresethba) + if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) qla4_83xx_set_idc_dontreset(ha); /* - * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other - * protocol drivers, we should not set device_state to - * NEED_RESET + * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other + * protocol drivers, we should not set device_state to NEED_RESET */ if (ql4xdontresethba || - (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) { + ((is_qla8032(ha) || is_qla8042(ha)) && + qla4_83xx_idc_dontreset(ha))) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", ha->host_no, __func__)); @@ -6081,6 +9522,7 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) { struct scsi_qla_host *ha = to_qla_host(shost); int rval = QLA_SUCCESS; + uint32_t idc_ctrl; if (ql4xdontresethba) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", @@ -6111,6 +9553,15 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) } recover_adapter: + /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if + * reset is issued by application */ + if ((is_qla8032(ha) || is_qla8042(ha)) && + test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); + qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, + (idc_ctrl | GRACEFUL_RESET_BIT1)); + } + rval = qla4xxx_recover_adapter(ha); if (rval != QLA_SUCCESS) { DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", @@ -6203,28 +9654,36 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) } fn = PCI_FUNC(ha->pdev->devfn); - while (fn > 0) { - fn--; - ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at " - "func %x\n", ha->host_no, __func__, fn); - /* Get the pci device given the domain, bus, - * slot/function number */ - other_pdev = - pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), - ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), - fn)); - - if (!other_pdev) - continue; + if (is_qla8022(ha)) { + while (fn > 0) { + fn--; + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", + ha->host_no, __func__, fn); + /* Get the pci device given the domain, bus, + * slot/function number */ + other_pdev = pci_get_domain_bus_and_slot( + pci_domain_nr(ha->pdev->bus), + ha->pdev->bus->number, + PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), + fn)); + + if (!other_pdev) + continue; - if (atomic_read(&other_pdev->enable_cnt)) { - ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI " - "func in enabled state%x\n", ha->host_no, - __func__, fn); + if (atomic_read(&other_pdev->enable_cnt)) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", + ha->host_no, __func__, fn); + pci_dev_put(other_pdev); + break; + } pci_dev_put(other_pdev); - break; } - pci_dev_put(other_pdev); + } else { + /* this case is meant for ISP83xx/ISP84xx only */ + if (qla4_83xx_can_perform_reset(ha)) { + /* reset fn as iSCSI is going to perform the reset */ + fn = 0; + } } /* The first function on the card, the reset owner will @@ -6258,6 +9717,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: " "FAILED\n", ha->host_no, __func__); + qla4xxx_free_irqs(ha); ha->isp_ops->idc_lock(ha); qla4_8xxx_clear_drv_active(ha); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, @@ -6285,6 +9745,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); if (rval == QLA_SUCCESS) ha->isp_ops->enable_intrs(ha); + else + qla4xxx_free_irqs(ha); ha->isp_ops->idc_lock(ha); qla4_8xxx_set_drv_active(ha); @@ -6402,6 +9864,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, + { + .vendor = PCI_VENDOR_ID_QLOGIC, + .device = PCI_DEVICE_ID_QLOGIC_ISP8042, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl); diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 6775a45af31..f11eaa77333 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -1,8 +1,8 @@ /* * QLogic iSCSI HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.03.00-k4" +#define QLA4XXX_DRIVER_VERSION "5.04.00-k6" diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 2c0d0ec8150..88d46fe6bf9 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -78,11 +78,6 @@ static void scsi_done(struct scsi_cmnd *cmd); * Definitions and constants. */ -#define MIN_RESET_DELAY (2*HZ) - -/* Do not call reset on error if we just did a reset within 15 sec. */ -#define MIN_RESET_PERIOD (15*HZ) - /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. @@ -96,6 +91,15 @@ EXPORT_SYMBOL(scsi_logging_level); ASYNC_DOMAIN(scsi_sd_probe_domain); EXPORT_SYMBOL(scsi_sd_probe_domain); +/* + * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of + * asynchronous system resume operations. It is marked 'exclusive' to avoid + * being included in the async_synchronize_full() that is invoked by + * dpm_resume() + */ +ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); +EXPORT_SYMBOL(scsi_sd_pm_domain); + /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. * You may not alter any existing entry (although adding new ones is * encouraged once assigned by ANSI/INCITS T10 @@ -166,47 +170,20 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { static DEFINE_MUTEX(host_cmd_pool_mutex); /** - * scsi_pool_alloc_command - internal function to get a fully allocated command - * @pool: slab pool to allocate the command from - * @gfp_mask: mask for the allocation - * - * Returns a fully allocated command (with the allied sense buffer) or - * NULL on failure - */ -static struct scsi_cmnd * -scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask) -{ - struct scsi_cmnd *cmd; - - cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); - if (!cmd) - return NULL; - - cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, - gfp_mask | pool->gfp_mask); - if (!cmd->sense_buffer) { - kmem_cache_free(pool->cmd_slab, cmd); - return NULL; - } - - return cmd; -} - -/** - * scsi_pool_free_command - internal function to release a command - * @pool: slab pool to allocate the command from + * scsi_host_free_command - internal function to release a command + * @shost: host to free the command for * @cmd: command to release * * the command must previously have been allocated by - * scsi_pool_alloc_command. + * scsi_host_alloc_command. */ static void -scsi_pool_free_command(struct scsi_host_cmd_pool *pool, - struct scsi_cmnd *cmd) +scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { + struct scsi_host_cmd_pool *pool = shost->cmd_pool; + if (cmd->prot_sdb) kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); - kmem_cache_free(pool->sense_slab, cmd->sense_buffer); kmem_cache_free(pool->cmd_slab, cmd); } @@ -222,22 +199,32 @@ scsi_pool_free_command(struct scsi_host_cmd_pool *pool, static struct scsi_cmnd * scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) { + struct scsi_host_cmd_pool *pool = shost->cmd_pool; struct scsi_cmnd *cmd; - cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); + cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); if (!cmd) - return NULL; + goto fail; + + cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, + gfp_mask | pool->gfp_mask); + if (!cmd->sense_buffer) + goto fail_free_cmd; if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); - - if (!cmd->prot_sdb) { - scsi_pool_free_command(shost->cmd_pool, cmd); - return NULL; - } + if (!cmd->prot_sdb) + goto fail_free_sense; } return cmd; + +fail_free_sense: + kmem_cache_free(pool->sense_slab, cmd->sense_buffer); +fail_free_cmd: + kmem_cache_free(pool->cmd_slab, cmd); +fail: + return NULL; } /** @@ -289,26 +276,19 @@ EXPORT_SYMBOL_GPL(__scsi_get_command); */ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) { - struct scsi_cmnd *cmd; + struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); + unsigned long flags; - /* Bail if we can't get a reference to the device */ - if (!get_device(&dev->sdev_gendev)) + if (unlikely(cmd == NULL)) return NULL; - cmd = __scsi_get_command(dev->host, gfp_mask); - - if (likely(cmd != NULL)) { - unsigned long flags; - - cmd->device = dev; - INIT_LIST_HEAD(&cmd->list); - spin_lock_irqsave(&dev->list_lock, flags); - list_add_tail(&cmd->list, &dev->cmd_list); - spin_unlock_irqrestore(&dev->list_lock, flags); - cmd->jiffies_at_alloc = jiffies; - } else - put_device(&dev->sdev_gendev); - + cmd->device = dev; + INIT_LIST_HEAD(&cmd->list); + INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); + spin_lock_irqsave(&dev->list_lock, flags); + list_add_tail(&cmd->list, &dev->cmd_list); + spin_unlock_irqrestore(&dev->list_lock, flags); + cmd->jiffies_at_alloc = jiffies; return cmd; } EXPORT_SYMBOL(scsi_get_command); @@ -317,25 +297,22 @@ EXPORT_SYMBOL(scsi_get_command); * __scsi_put_command - Free a struct scsi_cmnd * @shost: dev->host * @cmd: Command to free - * @dev: parent scsi device */ -void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd, - struct device *dev) +void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { unsigned long flags; - /* changing locks here, don't need to restore the irq state */ - spin_lock_irqsave(&shost->free_list_lock, flags); if (unlikely(list_empty(&shost->free_list))) { - list_add(&cmd->list, &shost->free_list); - cmd = NULL; + spin_lock_irqsave(&shost->free_list_lock, flags); + if (list_empty(&shost->free_list)) { + list_add(&cmd->list, &shost->free_list); + cmd = NULL; + } + spin_unlock_irqrestore(&shost->free_list_lock, flags); } - spin_unlock_irqrestore(&shost->free_list_lock, flags); if (likely(cmd != NULL)) - scsi_pool_free_command(shost->cmd_pool, cmd); - - put_device(dev); + scsi_host_free_command(shost, cmd); } EXPORT_SYMBOL(__scsi_put_command); @@ -349,7 +326,6 @@ EXPORT_SYMBOL(__scsi_put_command); */ void scsi_put_command(struct scsi_cmnd *cmd) { - struct scsi_device *sdev = cmd->device; unsigned long flags; /* serious error if the command hasn't come from a device list */ @@ -358,50 +334,109 @@ void scsi_put_command(struct scsi_cmnd *cmd) list_del_init(&cmd->list); spin_unlock_irqrestore(&cmd->device->list_lock, flags); - __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev); + cancel_delayed_work(&cmd->abort_work); + + __scsi_put_command(cmd->device->host, cmd); } EXPORT_SYMBOL(scsi_put_command); -static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask) +static struct scsi_host_cmd_pool * +scsi_find_host_cmd_pool(struct Scsi_Host *shost) +{ + if (shost->hostt->cmd_size) + return shost->hostt->cmd_pool; + if (shost->unchecked_isa_dma) + return &scsi_cmd_dma_pool; + return &scsi_cmd_pool; +} + +static void +scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool) { + kfree(pool->sense_name); + kfree(pool->cmd_name); + kfree(pool); +} + +static struct scsi_host_cmd_pool * +scsi_alloc_host_cmd_pool(struct Scsi_Host *shost) +{ + struct scsi_host_template *hostt = shost->hostt; + struct scsi_host_cmd_pool *pool; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return NULL; + + pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name); + pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name); + if (!pool->cmd_name || !pool->sense_name) { + scsi_free_host_cmd_pool(pool); + return NULL; + } + + pool->slab_flags = SLAB_HWCACHE_ALIGN; + if (shost->unchecked_isa_dma) { + pool->slab_flags |= SLAB_CACHE_DMA; + pool->gfp_mask = __GFP_DMA; + } + return pool; +} + +static struct scsi_host_cmd_pool * +scsi_get_host_cmd_pool(struct Scsi_Host *shost) +{ + struct scsi_host_template *hostt = shost->hostt; struct scsi_host_cmd_pool *retval = NULL, *pool; + size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size; + /* * Select a command slab for this host and create it if not * yet existent. */ mutex_lock(&host_cmd_pool_mutex); - pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : - &scsi_cmd_pool; + pool = scsi_find_host_cmd_pool(shost); + if (!pool) { + pool = scsi_alloc_host_cmd_pool(shost); + if (!pool) + goto out; + } + if (!pool->users) { - pool->cmd_slab = kmem_cache_create(pool->cmd_name, - sizeof(struct scsi_cmnd), 0, + pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0, pool->slab_flags, NULL); if (!pool->cmd_slab) - goto fail; + goto out_free_pool; pool->sense_slab = kmem_cache_create(pool->sense_name, SCSI_SENSE_BUFFERSIZE, 0, pool->slab_flags, NULL); - if (!pool->sense_slab) { - kmem_cache_destroy(pool->cmd_slab); - goto fail; - } + if (!pool->sense_slab) + goto out_free_slab; } pool->users++; retval = pool; - fail: +out: mutex_unlock(&host_cmd_pool_mutex); return retval; + +out_free_slab: + kmem_cache_destroy(pool->cmd_slab); +out_free_pool: + if (hostt->cmd_size) + scsi_free_host_cmd_pool(pool); + goto out; } -static void scsi_put_host_cmd_pool(gfp_t gfp_mask) +static void scsi_put_host_cmd_pool(struct Scsi_Host *shost) { + struct scsi_host_template *hostt = shost->hostt; struct scsi_host_cmd_pool *pool; mutex_lock(&host_cmd_pool_mutex); - pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : - &scsi_cmd_pool; + pool = scsi_find_host_cmd_pool(shost); + /* * This may happen if a driver has a mismatched get and put * of the command pool; the driver should be implicated in @@ -412,67 +447,13 @@ static void scsi_put_host_cmd_pool(gfp_t gfp_mask) if (!--pool->users) { kmem_cache_destroy(pool->cmd_slab); kmem_cache_destroy(pool->sense_slab); + if (hostt->cmd_size) + scsi_free_host_cmd_pool(pool); } mutex_unlock(&host_cmd_pool_mutex); } /** - * scsi_allocate_command - get a fully allocated SCSI command - * @gfp_mask: allocation mask - * - * This function is for use outside of the normal host based pools. - * It allocates the relevant command and takes an additional reference - * on the pool it used. This function *must* be paired with - * scsi_free_command which also has the identical mask, otherwise the - * free pool counts will eventually go wrong and you'll trigger a bug. - * - * This function should *only* be used by drivers that need a static - * command allocation at start of day for internal functions. - */ -struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask) -{ - struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); - - if (!pool) - return NULL; - - return scsi_pool_alloc_command(pool, gfp_mask); -} -EXPORT_SYMBOL(scsi_allocate_command); - -/** - * scsi_free_command - free a command allocated by scsi_allocate_command - * @gfp_mask: mask used in the original allocation - * @cmd: command to free - * - * Note: using the original allocation mask is vital because that's - * what determines which command pool we use to free the command. Any - * mismatch will cause the system to BUG eventually. - */ -void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd) -{ - struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); - - /* - * this could trigger if the mask to scsi_allocate_command - * doesn't match this mask. Otherwise we're guaranteed that this - * succeeds because scsi_allocate_command must have taken a reference - * on the pool - */ - BUG_ON(!pool); - - scsi_pool_free_command(pool, cmd); - /* - * scsi_put_host_cmd_pool is called twice; once to release the - * reference we took above, and once to release the reference - * originally taken by scsi_allocate_command - */ - scsi_put_host_cmd_pool(gfp_mask); - scsi_put_host_cmd_pool(gfp_mask); -} -EXPORT_SYMBOL(scsi_free_command); - -/** * scsi_setup_command_freelist - Setup the command freelist for a scsi host. * @shost: host to allocate the freelist for. * @@ -484,14 +465,13 @@ EXPORT_SYMBOL(scsi_free_command); */ int scsi_setup_command_freelist(struct Scsi_Host *shost) { - struct scsi_cmnd *cmd; const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; + struct scsi_cmnd *cmd; spin_lock_init(&shost->free_list_lock); INIT_LIST_HEAD(&shost->free_list); - shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask); - + shost->cmd_pool = scsi_get_host_cmd_pool(shost); if (!shost->cmd_pool) return -ENOMEM; @@ -500,7 +480,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) */ cmd = scsi_host_alloc_command(shost, gfp_mask); if (!cmd) { - scsi_put_host_cmd_pool(gfp_mask); + scsi_put_host_cmd_pool(shost); shost->cmd_pool = NULL; return -ENOMEM; } @@ -526,10 +506,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost) cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); list_del_init(&cmd->list); - scsi_pool_free_command(shost->cmd_pool, cmd); + scsi_host_free_command(shost, cmd); } shost->cmd_pool = NULL; - scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL); + scsi_put_host_cmd_pool(shost); } #ifdef CONFIG_SCSI_LOGGING @@ -658,7 +638,6 @@ EXPORT_SYMBOL(scsi_cmd_get_serial); int scsi_dispatch_cmd(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; - unsigned long timeout; int rtn = 0; atomic_inc(&cmd->device->iorequest_cnt); @@ -704,28 +683,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) (cmd->device->lun << 5 & 0xe0); } - /* - * We will wait MIN_RESET_DELAY clock ticks after the last reset so - * we can avoid the drive not being ready. - */ - timeout = host->last_reset + MIN_RESET_DELAY; - - if (host->resetting && time_before(jiffies, timeout)) { - int ticks_remaining = timeout - jiffies; - /* - * NOTE: This may be executed from within an interrupt - * handler! This is bad, but for now, it'll do. The irq - * level of the interrupt handler has been masked out by the - * platform dependent interrupt handling code already, so the - * sti() here will not cause another call to the SCSI host's - * interrupt handler (assuming there is one irq-level per - * host). - */ - while (--ticks_remaining >= 0) - mdelay(1 + 999 / HZ); - host->resetting = 0; - } - scsi_log_send(cmd); /* @@ -770,15 +727,13 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) } /** - * scsi_done - Enqueue the finished SCSI command into the done queue. + * scsi_done - Invoke completion on finished SCSI command. * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives * ownership back to SCSI Core -- i.e. the LLDD has finished with it. * * Description: This function is the mid-level's (SCSI Core) interrupt routine, * which regains ownership of the SCSI command (de facto) from a LLDD, and - * enqueues the command to the done queue for further processing. - * - * This is the producer of the done queue who enqueues at the tail. + * calls blk_complete_request() for further processing. * * This function is interrupt context safe. */ @@ -981,7 +936,7 @@ EXPORT_SYMBOL(scsi_track_queue_full); * This is an internal helper function. You probably want to use * scsi_get_vpd_page instead. * - * Returns 0 on success or a negative error number. + * Returns size of the vpd page on success or a negative error number. */ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, u8 page, unsigned len) @@ -989,6 +944,9 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, int result; unsigned char cmd[16]; + if (len < 4) + return -EINVAL; + cmd[0] = INQUIRY; cmd[1] = 1; /* EVPD */ cmd[2] = page; @@ -1003,13 +961,13 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, NULL, 30 * HZ, 3, NULL); if (result) - return result; + return -EIO; /* Sanity check that we got the page back that we asked for */ if (buffer[1] != page) return -EIO; - return 0; + return get_unaligned_be16(&buffer[2]) + 4; } /** @@ -1031,20 +989,23 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, { int i, result; + if (sdev->skip_vpd_pages) + goto fail; + /* Ask for all the pages supported by this device */ result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); - if (result) + if (result < 4) goto fail; /* If the user actually wanted this page, we can skip the rest */ if (page == 0) return 0; - for (i = 0; i < min((int)buf[3], buf_len - 4); i++) - if (buf[i + 4] == page) + for (i = 4; i < min(result, buf_len); i++) + if (buf[i] == page) goto found; - if (i < buf[3] && i >= buf_len - 4) + if (i < result && i >= buf_len) /* ran off the end of the buffer, give us benefit of doubt */ goto found; /* The device claims it doesn't support the requested page */ @@ -1052,7 +1013,7 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, found: result = scsi_vpd_inquiry(sdev, buf, page, buf_len); - if (result) + if (result < 0) goto fail; return 0; @@ -1063,6 +1024,93 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, EXPORT_SYMBOL_GPL(scsi_get_vpd_page); /** + * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure + * @sdev: The device to ask + * + * Attach the 'Device Identification' VPD page (0x83) and the + * 'Unit Serial Number' VPD page (0x80) to a SCSI device + * structure. This information can be used to identify the device + * uniquely. + */ +void scsi_attach_vpd(struct scsi_device *sdev) +{ + int result, i; + int vpd_len = SCSI_VPD_PG_LEN; + int pg80_supported = 0; + int pg83_supported = 0; + unsigned char *vpd_buf; + + if (sdev->skip_vpd_pages) + return; +retry_pg0: + vpd_buf = kmalloc(vpd_len, GFP_KERNEL); + if (!vpd_buf) + return; + + /* Ask for all the pages supported by this device */ + result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len); + if (result < 0) { + kfree(vpd_buf); + return; + } + if (result > vpd_len) { + vpd_len = result; + kfree(vpd_buf); + goto retry_pg0; + } + + for (i = 4; i < result; i++) { + if (vpd_buf[i] == 0x80) + pg80_supported = 1; + if (vpd_buf[i] == 0x83) + pg83_supported = 1; + } + kfree(vpd_buf); + vpd_len = SCSI_VPD_PG_LEN; + + if (pg80_supported) { +retry_pg80: + vpd_buf = kmalloc(vpd_len, GFP_KERNEL); + if (!vpd_buf) + return; + + result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len); + if (result < 0) { + kfree(vpd_buf); + return; + } + if (result > vpd_len) { + vpd_len = result; + kfree(vpd_buf); + goto retry_pg80; + } + sdev->vpd_pg80_len = result; + sdev->vpd_pg80 = vpd_buf; + vpd_len = SCSI_VPD_PG_LEN; + } + + if (pg83_supported) { +retry_pg83: + vpd_buf = kmalloc(vpd_len, GFP_KERNEL); + if (!vpd_buf) + return; + + result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len); + if (result < 0) { + kfree(vpd_buf); + return; + } + if (result > vpd_len) { + vpd_len = result; + kfree(vpd_buf); + goto retry_pg83; + } + sdev->vpd_pg83_len = result; + sdev->vpd_pg83 = vpd_buf; + } +} + +/** * scsi_report_opcode - Find out if a given command opcode is supported * @sdev: scsi device to query * @buffer: scratch buffer (must be at least 20 bytes long) @@ -1070,8 +1118,8 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page); * @opcode: opcode for command to look up * * Uses the REPORT SUPPORTED OPERATION CODES to look up the given - * opcode. Returns 0 if RSOC fails or if the command opcode is - * unsupported. Returns 1 if the device claims to support the command. + * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is + * unsupported and 1 if the device claims to support the command. */ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, unsigned int len, unsigned char opcode) @@ -1081,7 +1129,7 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, int result; if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) - return 0; + return -EINVAL; memset(cmd, 0, 16); cmd[0] = MAINTENANCE_IN; @@ -1097,7 +1145,7 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, if (result && scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST && (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) - return 0; + return -EINVAL; if ((buffer[1] & 3) == 3) /* Command supported */ return 1; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 5cda11c07c6..1328a262107 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -64,6 +64,7 @@ static const char * scsi_debug_version_date = "20100324"; /* Additional Sense Code (ASC) */ #define NO_ADDITIONAL_SENSE 0x0 #define LOGICAL_UNIT_NOT_READY 0x4 +#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8 #define UNRECOVERED_READ_ERR 0x11 #define PARAMETER_LIST_LENGTH_ERR 0x1a #define INVALID_OPCODE 0x20 @@ -129,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324"; #define SCSI_DEBUG_OPT_DIF_ERR 32 #define SCSI_DEBUG_OPT_DIX_ERR 64 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 +#define SCSI_DEBUG_OPT_SHORT_TRANSFER 256 /* When "every_nth" > 0 then modulo "every_nth" commands: * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write @@ -169,7 +171,7 @@ static int scsi_debug_dix = DEF_DIX; static int scsi_debug_dsense = DEF_D_SENSE; static int scsi_debug_every_nth = DEF_EVERY_NTH; static int scsi_debug_fake_rw = DEF_FAKE_RW; -static int scsi_debug_guard = DEF_GUARD; +static unsigned int scsi_debug_guard = DEF_GUARD; static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; static int scsi_debug_max_luns = DEF_MAX_LUNS; static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; @@ -195,6 +197,7 @@ static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; static bool scsi_debug_removable = DEF_REMOVABLE; +static bool scsi_debug_clustering; static int scsi_debug_cmnd_count = 0; @@ -258,7 +261,7 @@ struct sdebug_queued_cmd { static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; static unsigned char * fake_storep; /* ramdisk storage */ -static unsigned char *dif_storep; /* protection info */ +static struct sd_dif_tuple *dif_storep; /* protection info */ static void *map_storep; /* provisioning map */ static unsigned long map_size; @@ -277,11 +280,6 @@ static char sdebug_proc_name[] = "scsi_debug"; static struct bus_type pseudo_lld_bus; -static inline sector_t dif_offset(sector_t sector) -{ - return sector << 3; -} - static struct device_driver sdebug_driverfs_driver = { .name = sdebug_proc_name, .bus = &pseudo_lld_bus, @@ -298,6 +296,20 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 0, 0, 0x0, 0x0}; +static void *fake_store(unsigned long long lba) +{ + lba = do_div(lba, sdebug_store_sectors); + + return fake_storep + lba * scsi_debug_sector_size; +} + +static struct sd_dif_tuple *dif_store(sector_t sector) +{ + sector = do_div(sector, sdebug_store_sectors); + + return dif_storep + sector; +} + static int sdebug_add_adapter(void); static void sdebug_remove_adapter(void); @@ -439,10 +451,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, arr, arr_len); - if (sdb->resid) - sdb->resid -= act_len; - else - sdb->resid = scsi_bufflen(scp) - act_len; + sdb->resid = scsi_bufflen(scp) - act_len; return 0; } @@ -1693,114 +1702,159 @@ static int check_device_access_params(struct sdebug_dev_info *devi, return 0; } +/* Returns number of bytes copied or -1 if error. */ static int do_device_access(struct scsi_cmnd *scmd, struct sdebug_dev_info *devi, unsigned long long lba, unsigned int num, int write) { int ret; unsigned long long block, rest = 0; - int (*func)(struct scsi_cmnd *, unsigned char *, int); + struct scsi_data_buffer *sdb; + enum dma_data_direction dir; + size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, + off_t); + + if (write) { + sdb = scsi_out(scmd); + dir = DMA_TO_DEVICE; + func = sg_pcopy_to_buffer; + } else { + sdb = scsi_in(scmd); + dir = DMA_FROM_DEVICE; + func = sg_pcopy_from_buffer; + } - func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; + if (!sdb->length) + return 0; + if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir)) + return -1; block = do_div(lba, sdebug_store_sectors); if (block + num > sdebug_store_sectors) rest = block + num - sdebug_store_sectors; - ret = func(scmd, fake_storep + (block * scsi_debug_sector_size), - (num - rest) * scsi_debug_sector_size); - if (!ret && rest) - ret = func(scmd, fake_storep, rest * scsi_debug_sector_size); + ret = func(sdb->table.sgl, sdb->table.nents, + fake_storep + (block * scsi_debug_sector_size), + (num - rest) * scsi_debug_sector_size, 0); + if (ret != (num - rest) * scsi_debug_sector_size) + return ret; + + if (rest) { + ret += func(sdb->table.sgl, sdb->table.nents, + fake_storep, rest * scsi_debug_sector_size, + (num - rest) * scsi_debug_sector_size); + } return ret; } -static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, - unsigned int sectors, u32 ei_lba) +static __be16 dif_compute_csum(const void *buf, int len) { - unsigned int i, resid; - struct scatterlist *psgl; - struct sd_dif_tuple *sdt; - sector_t sector; - sector_t tmp_sec = start_sec; - void *paddr; + __be16 csum; - start_sec = do_div(tmp_sec, sdebug_store_sectors); + if (scsi_debug_guard) + csum = (__force __be16)ip_compute_csum(buf, len); + else + csum = cpu_to_be16(crc_t10dif(buf, len)); - sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec)); + return csum; +} - for (i = 0 ; i < sectors ; i++) { - u16 csum; +static int dif_verify(struct sd_dif_tuple *sdt, const void *data, + sector_t sector, u32 ei_lba) +{ + __be16 csum = dif_compute_csum(data, scsi_debug_sector_size); - if (sdt[i].app_tag == 0xffff) - continue; + if (sdt->guard_tag != csum) { + pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", + __func__, + (unsigned long)sector, + be16_to_cpu(sdt->guard_tag), + be16_to_cpu(csum)); + return 0x01; + } + if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && + be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { + pr_err("%s: REF check failed on sector %lu\n", + __func__, (unsigned long)sector); + return 0x03; + } + if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && + be32_to_cpu(sdt->ref_tag) != ei_lba) { + pr_err("%s: REF check failed on sector %lu\n", + __func__, (unsigned long)sector); + return 0x03; + } + return 0; +} - sector = start_sec + i; +static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, + unsigned int sectors, bool read) +{ + size_t resid; + void *paddr; + const void *dif_store_end = dif_storep + sdebug_store_sectors; + struct sg_mapping_iter miter; - switch (scsi_debug_guard) { - case 1: - csum = ip_compute_csum(fake_storep + - sector * scsi_debug_sector_size, - scsi_debug_sector_size); - break; - case 0: - csum = crc_t10dif(fake_storep + - sector * scsi_debug_sector_size, - scsi_debug_sector_size); - csum = cpu_to_be16(csum); - break; - default: - BUG(); - } + /* Bytes of protection data to copy into sgl */ + resid = sectors * sizeof(*dif_storep); - if (sdt[i].guard_tag != csum) { - printk(KERN_ERR "%s: GUARD check failed on sector %lu" \ - " rcvd 0x%04x, data 0x%04x\n", __func__, - (unsigned long)sector, - be16_to_cpu(sdt[i].guard_tag), - be16_to_cpu(csum)); - dif_errors++; - return 0x01; - } + sg_miter_start(&miter, scsi_prot_sglist(SCpnt), + scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC | + (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG)); - if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && - be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) { - printk(KERN_ERR "%s: REF check failed on sector %lu\n", - __func__, (unsigned long)sector); - dif_errors++; - return 0x03; - } + while (sg_miter_next(&miter) && resid > 0) { + size_t len = min(miter.length, resid); + void *start = dif_store(sector); + size_t rest = 0; - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && - be32_to_cpu(sdt[i].ref_tag) != ei_lba) { - printk(KERN_ERR "%s: REF check failed on sector %lu\n", - __func__, (unsigned long)sector); - dif_errors++; - return 0x03; + if (dif_store_end < start + len) + rest = start + len - dif_store_end; + + paddr = miter.addr; + + if (read) + memcpy(paddr, start, len - rest); + else + memcpy(start, paddr, len - rest); + + if (rest) { + if (read) + memcpy(paddr + len - rest, dif_storep, rest); + else + memcpy(dif_storep, paddr + len - rest, rest); } - ei_lba++; + sector += len / sizeof(*dif_storep); + resid -= len; } + sg_miter_stop(&miter); +} + +static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, + unsigned int sectors, u32 ei_lba) +{ + unsigned int i; + struct sd_dif_tuple *sdt; + sector_t sector; - resid = sectors * 8; /* Bytes of protection data to copy into sgl */ - sector = start_sec; + for (i = 0; i < sectors; i++, ei_lba++) { + int ret; - scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { - int len = min(psgl->length, resid); + sector = start_sec + i; + sdt = dif_store(sector); - paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; - memcpy(paddr, dif_storep + dif_offset(sector), len); + if (sdt->app_tag == cpu_to_be16(0xffff)) + continue; - sector += len >> 3; - if (sector >= sdebug_store_sectors) { - /* Force wrap */ - tmp_sec = sector; - sector = do_div(tmp_sec, sdebug_store_sectors); + ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); + if (ret) { + dif_errors++; + return ret; } - resid -= len; - kunmap_atomic(paddr); } + dif_copy_prot(SCpnt, start_sec, sectors, true); dix_reads++; return 0; @@ -1836,20 +1890,27 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, return check_condition_result; } + read_lock_irqsave(&atomic_rw, iflags); + /* DIX + T10 DIF */ if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba); if (prot_ret) { + read_unlock_irqrestore(&atomic_rw, iflags); mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret); return illegal_condition_result; } } - read_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 0); read_unlock_irqrestore(&atomic_rw, iflags); - return ret; + if (ret == -1) + return DID_ERROR << 16; + + scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret; + + return 0; } void dump_sector(unsigned char *buf, int len) @@ -1876,145 +1937,114 @@ void dump_sector(unsigned char *buf, int len) static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, unsigned int sectors, u32 ei_lba) { - int i, j, ret; + int ret; struct sd_dif_tuple *sdt; - struct scatterlist *dsgl = scsi_sglist(SCpnt); - struct scatterlist *psgl = scsi_prot_sglist(SCpnt); - void *daddr, *paddr; - sector_t tmp_sec = start_sec; - sector_t sector; + void *daddr; + sector_t sector = start_sec; int ppage_offset; - unsigned short csum; - - sector = do_div(tmp_sec, sdebug_store_sectors); + int dpage_offset; + struct sg_mapping_iter diter; + struct sg_mapping_iter piter; BUG_ON(scsi_sg_count(SCpnt) == 0); BUG_ON(scsi_prot_sg_count(SCpnt) == 0); - paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; - ppage_offset = 0; - - /* For each data page */ - scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { - daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset; - - /* For each sector-sized chunk in data page */ - for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) { + sg_miter_start(&piter, scsi_prot_sglist(SCpnt), + scsi_prot_sg_count(SCpnt), + SG_MITER_ATOMIC | SG_MITER_FROM_SG); + sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt), + SG_MITER_ATOMIC | SG_MITER_FROM_SG); + + /* For each protection page */ + while (sg_miter_next(&piter)) { + dpage_offset = 0; + if (WARN_ON(!sg_miter_next(&diter))) { + ret = 0x01; + goto out; + } + for (ppage_offset = 0; ppage_offset < piter.length; + ppage_offset += sizeof(struct sd_dif_tuple)) { /* If we're at the end of the current - * protection page advance to the next one + * data page advance to the next one */ - if (ppage_offset >= psgl->length) { - kunmap_atomic(paddr); - psgl = sg_next(psgl); - BUG_ON(psgl == NULL); - paddr = kmap_atomic(sg_page(psgl)) - + psgl->offset; - ppage_offset = 0; - } - - sdt = paddr + ppage_offset; - - switch (scsi_debug_guard) { - case 1: - csum = ip_compute_csum(daddr, - scsi_debug_sector_size); - break; - case 0: - csum = cpu_to_be16(crc_t10dif(daddr, - scsi_debug_sector_size)); - break; - default: - BUG(); - ret = 0; - goto out; + if (dpage_offset >= diter.length) { + if (WARN_ON(!sg_miter_next(&diter))) { + ret = 0x01; + goto out; + } + dpage_offset = 0; } - if (sdt->guard_tag != csum) { - printk(KERN_ERR - "%s: GUARD check failed on sector %lu " \ - "rcvd 0x%04x, calculated 0x%04x\n", - __func__, (unsigned long)sector, - be16_to_cpu(sdt->guard_tag), - be16_to_cpu(csum)); - ret = 0x01; - dump_sector(daddr, scsi_debug_sector_size); - goto out; - } + sdt = piter.addr + ppage_offset; + daddr = diter.addr + dpage_offset; - if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && - be32_to_cpu(sdt->ref_tag) - != (start_sec & 0xffffffff)) { - printk(KERN_ERR - "%s: REF check failed on sector %lu\n", - __func__, (unsigned long)sector); - ret = 0x03; + ret = dif_verify(sdt, daddr, sector, ei_lba); + if (ret) { dump_sector(daddr, scsi_debug_sector_size); goto out; } - if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && - be32_to_cpu(sdt->ref_tag) != ei_lba) { - printk(KERN_ERR - "%s: REF check failed on sector %lu\n", - __func__, (unsigned long)sector); - ret = 0x03; - dump_sector(daddr, scsi_debug_sector_size); - goto out; - } - - /* Would be great to copy this in bigger - * chunks. However, for the sake of - * correctness we need to verify each sector - * before writing it to "stable" storage - */ - memcpy(dif_storep + dif_offset(sector), sdt, 8); - sector++; - - if (sector == sdebug_store_sectors) - sector = 0; /* Force wrap */ - - start_sec++; ei_lba++; - daddr += scsi_debug_sector_size; - ppage_offset += sizeof(struct sd_dif_tuple); + dpage_offset += scsi_debug_sector_size; } - - kunmap_atomic(daddr); + diter.consumed = dpage_offset; + sg_miter_stop(&diter); } + sg_miter_stop(&piter); - kunmap_atomic(paddr); - + dif_copy_prot(SCpnt, start_sec, sectors, false); dix_writes++; return 0; out: dif_errors++; - kunmap_atomic(daddr); - kunmap_atomic(paddr); + sg_miter_stop(&diter); + sg_miter_stop(&piter); return ret; } -static unsigned int map_state(sector_t lba, unsigned int *num) +static unsigned long lba_to_map_index(sector_t lba) { - unsigned int granularity, alignment, mapped; - sector_t block, next, end; + if (scsi_debug_unmap_alignment) { + lba += scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; + } + do_div(lba, scsi_debug_unmap_granularity); - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - block = lba + alignment; - do_div(block, granularity); + return lba; +} + +static sector_t map_index_to_lba(unsigned long index) +{ + sector_t lba = index * scsi_debug_unmap_granularity; + + if (scsi_debug_unmap_alignment) { + lba -= scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; + } + + return lba; +} + +static unsigned int map_state(sector_t lba, unsigned int *num) +{ + sector_t end; + unsigned int mapped; + unsigned long index; + unsigned long next; - mapped = test_bit(block, map_storep); + index = lba_to_map_index(lba); + mapped = test_bit(index, map_storep); if (mapped) - next = find_next_zero_bit(map_storep, map_size, block); + next = find_next_zero_bit(map_storep, map_size, index); else - next = find_next_bit(map_storep, map_size, block); + next = find_next_bit(map_storep, map_size, index); - end = next * granularity - scsi_debug_unmap_alignment; + end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); *num = end - lba; return mapped; @@ -2022,47 +2052,42 @@ static unsigned int map_state(sector_t lba, unsigned int *num) static void map_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (block < map_size) - set_bit(block, map_storep); + if (index < map_size) + set_bit(index, map_storep); - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } static void unmap_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (rem == 0 && lba + granularity < end && block < map_size) { - clear_bit(block, map_storep); - if (scsi_debug_lbprz) + if (lba == map_index_to_lba(index) && + lba + scsi_debug_unmap_granularity <= end && + index < map_size) { + clear_bit(index, map_storep); + if (scsi_debug_lbprz) { memset(fake_storep + - block * scsi_debug_sector_size, 0, - scsi_debug_sector_size); + lba * scsi_debug_sector_size, 0, + scsi_debug_sector_size * + scsi_debug_unmap_granularity); + } + if (dif_storep) { + memset(dif_storep + lba, 0xff, + sizeof(*dif_storep) * + scsi_debug_unmap_granularity); + } } - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } @@ -2077,19 +2102,21 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, if (ret) return ret; + write_lock_irqsave(&atomic_rw, iflags); + /* DIX + T10 DIF */ if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba); if (prot_ret) { + write_unlock_irqrestore(&atomic_rw, iflags); mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret); return illegal_condition_result; } } - write_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 1); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) @@ -2122,7 +2149,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); - if (unmap && scsi_debug_unmap_granularity) { + if (unmap && scsi_debug_lbp()) { unmap_region(lba, num); goto out; } @@ -2146,7 +2173,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, fake_storep + (lba * scsi_debug_sector_size), scsi_debug_sector_size); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); out: write_unlock_irqrestore(&atomic_rw, iflags); @@ -2166,6 +2193,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) struct unmap_block_desc *desc; unsigned int i, payload_len, descriptors; int ret; + unsigned long iflags; ret = check_readiness(scmd, 1, devip); if (ret) @@ -2187,6 +2215,8 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) desc = (void *)&buf[8]; + write_lock_irqsave(&atomic_rw, iflags); + for (i = 0 ; i < descriptors ; i++) { unsigned long long lba = get_unaligned_be64(&desc[i].lba); unsigned int num = get_unaligned_be32(&desc[i].blocks); @@ -2201,6 +2231,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) ret = 0; out: + write_unlock_irqrestore(&atomic_rw, iflags); kfree(buf); return ret; @@ -2301,36 +2332,37 @@ static int resp_report_luns(struct scsi_cmnd * scp, static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, unsigned int num, struct sdebug_dev_info *devip) { - int i, j, ret = -1; + int j; unsigned char *kaddr, *buf; unsigned int offset; - struct scatterlist *sg; struct scsi_data_buffer *sdb = scsi_in(scp); + struct sg_mapping_iter miter; /* better not to use temporary buffer. */ buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); - if (!buf) - return ret; + if (!buf) { + mk_sense_buffer(devip, NOT_READY, + LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + return check_condition_result; + } scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); offset = 0; - for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { - kaddr = (unsigned char *)kmap_atomic(sg_page(sg)); - if (!kaddr) - goto out; + sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents, + SG_MITER_ATOMIC | SG_MITER_TO_SG); - for (j = 0; j < sg->length; j++) - *(kaddr + sg->offset + j) ^= *(buf + offset + j); + while (sg_miter_next(&miter)) { + kaddr = miter.addr; + for (j = 0; j < miter.length; j++) + *(kaddr + j) ^= *(buf + offset + j); - offset += sg->length; - kunmap_atomic(kaddr); + offset += miter.length; } - ret = 0; -out: + sg_miter_stop(&miter); kfree(buf); - return ret; + return 0; } /* When timer goes off this function is called. */ @@ -2662,8 +2694,8 @@ static void __init sdebug_build_parts(unsigned char *ramp, / sdebug_sectors_per; pp->end_sector = (end_sec % sdebug_sectors_per) + 1; - pp->start_sect = start_sec; - pp->nr_sects = end_sec - start_sec + 1; + pp->start_sect = cpu_to_le32(start_sec); + pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); pp->sys_ind = 0x83; /* plain Linux partition */ } } @@ -2732,6 +2764,7 @@ static int schedule_resp(struct scsi_cmnd * cmnd, */ module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); module_param_named(ato, scsi_debug_ato, int, S_IRUGO); +module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR); module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); module_param_named(dif, scsi_debug_dif, int, S_IRUGO); @@ -2739,7 +2772,7 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO); module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); -module_param_named(guard, scsi_debug_guard, int, S_IRUGO); +module_param_named(guard, scsi_debug_guard, uint, S_IRUGO); module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); @@ -2775,6 +2808,7 @@ MODULE_VERSION(SCSI_DEBUG_VERSION); MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); +MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)"); MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); @@ -2823,31 +2857,27 @@ static const char * scsi_debug_info(struct Scsi_Host * shp) /* scsi_debug_proc_info * Used if the driver currently has no own support for /proc/scsi */ -static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, - int length, int inout) +static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length) { - int len, pos, begin; - int orig_length; - - orig_length = length; + char arr[16]; + int opts; + int minLen = length > 15 ? 15 : length; - if (inout == 1) { - char arr[16]; - int minLen = length > 15 ? 15 : length; + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + memcpy(arr, buffer, minLen); + arr[minLen] = '\0'; + if (1 != sscanf(arr, "%d", &opts)) + return -EINVAL; + scsi_debug_opts = opts; + if (scsi_debug_every_nth != 0) + scsi_debug_cmnd_count = 0; + return length; +} - if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) - return -EACCES; - memcpy(arr, buffer, minLen); - arr[minLen] = '\0'; - if (1 != sscanf(arr, "%d", &pos)) - return -EINVAL; - scsi_debug_opts = pos; - if (scsi_debug_every_nth != 0) - scsi_debug_cmnd_count = 0; - return length; - } - begin = 0; - pos = len = sprintf(buffer, "scsi_debug adapter driver, version " +static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host) +{ + seq_printf(m, "scsi_debug adapter driver, version " "%s [%s]\n" "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, " "every_nth=%d(curr:%d)\n" @@ -2862,24 +2892,16 @@ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **sta scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets, num_host_resets, dix_reads, dix_writes, dif_errors); - if (pos < offset) { - len = 0; - begin = pos; - } - *start = buffer + (offset - begin); /* Start of wanted data */ - len -= (offset - begin); - if (len > length) - len = length; - return len; + return 0; } -static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf) +static ssize_t delay_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay); } -static ssize_t sdebug_delay_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t delay_store(struct device_driver *ddp, const char *buf, + size_t count) { int delay; char work[20]; @@ -2892,16 +2914,15 @@ static ssize_t sdebug_delay_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show, - sdebug_delay_store); +static DRIVER_ATTR_RW(delay); -static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf) +static ssize_t opts_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts); } -static ssize_t sdebug_opts_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t opts_store(struct device_driver *ddp, const char *buf, + size_t count) { int opts; char work[20]; @@ -2921,15 +2942,14 @@ opts_done: scsi_debug_cmnd_count = 0; return count; } -DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show, - sdebug_opts_store); +static DRIVER_ATTR_RW(opts); -static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf) +static ssize_t ptype_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype); } -static ssize_t sdebug_ptype_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t ptype_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -2939,14 +2959,14 @@ static ssize_t sdebug_ptype_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store); +static DRIVER_ATTR_RW(ptype); -static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf) +static ssize_t dsense_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense); } -static ssize_t sdebug_dsense_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t dsense_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -2956,15 +2976,14 @@ static ssize_t sdebug_dsense_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show, - sdebug_dsense_store); +static DRIVER_ATTR_RW(dsense); -static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf) +static ssize_t fake_rw_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw); } -static ssize_t sdebug_fake_rw_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -2974,15 +2993,14 @@ static ssize_t sdebug_fake_rw_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show, - sdebug_fake_rw_store); +static DRIVER_ATTR_RW(fake_rw); -static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf) +static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0); } -static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -2992,15 +3010,14 @@ static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show, - sdebug_no_lun_0_store); +static DRIVER_ATTR_RW(no_lun_0); -static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf) +static ssize_t num_tgts_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts); } -static ssize_t sdebug_num_tgts_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -3011,27 +3028,26 @@ static ssize_t sdebug_num_tgts_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show, - sdebug_num_tgts_store); +static DRIVER_ATTR_RW(num_tgts); -static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf) +static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb); } -DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL); +static DRIVER_ATTR_RO(dev_size_mb); -static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf) +static ssize_t num_parts_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts); } -DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL); +static DRIVER_ATTR_RO(num_parts); -static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf) +static ssize_t every_nth_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth); } -static ssize_t sdebug_every_nth_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, + size_t count) { int nth; @@ -3042,15 +3058,14 @@ static ssize_t sdebug_every_nth_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show, - sdebug_every_nth_store); +static DRIVER_ATTR_RW(every_nth); -static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf) +static ssize_t max_luns_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns); } -static ssize_t sdebug_max_luns_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -3061,15 +3076,14 @@ static ssize_t sdebug_max_luns_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show, - sdebug_max_luns_store); +static DRIVER_ATTR_RW(max_luns); -static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf) +static ssize_t max_queue_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue); } -static ssize_t sdebug_max_queue_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -3080,27 +3094,26 @@ static ssize_t sdebug_max_queue_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show, - sdebug_max_queue_store); +static DRIVER_ATTR_RW(max_queue); -static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf) +static ssize_t no_uld_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld); } -DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL); +static DRIVER_ATTR_RO(no_uld); -static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf) +static ssize_t scsi_level_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level); } -DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL); +static DRIVER_ATTR_RO(scsi_level); -static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf) +static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb); } -static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -3113,16 +3126,15 @@ static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show, - sdebug_virtual_gb_store); +static DRIVER_ATTR_RW(virtual_gb); -static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf) +static ssize_t add_host_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host); } -static ssize_t sdebug_add_host_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t add_host_store(struct device_driver *ddp, const char *buf, + size_t count) { int delta_hosts; @@ -3139,16 +3151,14 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp, } return count; } -DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, - sdebug_add_host_store); +static DRIVER_ATTR_RW(add_host); -static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp, - char * buf) +static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno); } -static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp, - const char * buf, size_t count) +static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -3158,40 +3168,39 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp, } return -EINVAL; } -DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show, - sdebug_vpd_use_hostno_store); +static DRIVER_ATTR_RW(vpd_use_hostno); -static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf) +static ssize_t sector_size_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size); } -DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL); +static DRIVER_ATTR_RO(sector_size); -static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf) +static ssize_t dix_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix); } -DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL); +static DRIVER_ATTR_RO(dix); -static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf) +static ssize_t dif_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif); } -DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL); +static DRIVER_ATTR_RO(dif); -static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf) +static ssize_t guard_show(struct device_driver *ddp, char *buf) { - return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard); + return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard); } -DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL); +static DRIVER_ATTR_RO(guard); -static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf) +static ssize_t ato_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato); } -DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); +static DRIVER_ATTR_RO(ato); -static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) +static ssize_t map_show(struct device_driver *ddp, char *buf) { ssize_t count; @@ -3206,15 +3215,14 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) return count; } -DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL); +static DRIVER_ATTR_RO(map); -static ssize_t sdebug_removable_show(struct device_driver *ddp, - char *buf) +static ssize_t removable_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0); } -static ssize_t sdebug_removable_store(struct device_driver *ddp, - const char *buf, size_t count) +static ssize_t removable_store(struct device_driver *ddp, const char *buf, + size_t count) { int n; @@ -3224,76 +3232,45 @@ static ssize_t sdebug_removable_store(struct device_driver *ddp, } return -EINVAL; } -DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show, - sdebug_removable_store); +static DRIVER_ATTR_RW(removable); - -/* Note: The following function creates attribute files in the +/* Note: The following array creates attribute files in the /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these files (over those found in the /sys/module/scsi_debug/parameters directory) is that auxiliary actions can be triggered when an attribute is changed. For example see: sdebug_add_host_store() above. */ -static int do_create_driverfs_files(void) -{ - int ret; - ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); - ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map); - return ret; -} - -static void do_remove_driverfs_files(void) -{ - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay); - driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); -} +static struct attribute *sdebug_drv_attrs[] = { + &driver_attr_delay.attr, + &driver_attr_opts.attr, + &driver_attr_ptype.attr, + &driver_attr_dsense.attr, + &driver_attr_fake_rw.attr, + &driver_attr_no_lun_0.attr, + &driver_attr_num_tgts.attr, + &driver_attr_dev_size_mb.attr, + &driver_attr_num_parts.attr, + &driver_attr_every_nth.attr, + &driver_attr_max_luns.attr, + &driver_attr_max_queue.attr, + &driver_attr_no_uld.attr, + &driver_attr_scsi_level.attr, + &driver_attr_virtual_gb.attr, + &driver_attr_add_host.attr, + &driver_attr_vpd_use_hostno.attr, + &driver_attr_sector_size.attr, + &driver_attr_dix.attr, + &driver_attr_dif.attr, + &driver_attr_guard.attr, + &driver_attr_ato.attr, + &driver_attr_map.attr, + &driver_attr_removable.attr, + NULL, +}; +ATTRIBUTE_GROUPS(sdebug_drv); -struct device *pseudo_primary; +static struct device *pseudo_primary; static int __init scsi_debug_init(void) { @@ -3381,7 +3358,7 @@ static int __init scsi_debug_init(void) if (scsi_debug_num_parts > 0) sdebug_build_parts(fake_storep, sz); - if (scsi_debug_dif) { + if (scsi_debug_dix) { int dif_size; dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); @@ -3401,8 +3378,6 @@ static int __init scsi_debug_init(void) /* Logical Block Provisioning */ if (scsi_debug_lbp()) { - unsigned int map_bytes; - scsi_debug_unmap_max_blocks = clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); @@ -3413,16 +3388,16 @@ static int __init scsi_debug_init(void) clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); if (scsi_debug_unmap_alignment && - scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { + scsi_debug_unmap_granularity <= + scsi_debug_unmap_alignment) { printk(KERN_ERR - "%s: ERR: unmap_granularity < unmap_alignment\n", + "%s: ERR: unmap_granularity <= unmap_alignment\n", __func__); return -EINVAL; } - map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); - map_bytes = map_size >> 3; - map_storep = vmalloc(map_bytes); + map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; + map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", map_size); @@ -3433,7 +3408,7 @@ static int __init scsi_debug_init(void) goto free_vm; } - memset(map_storep, 0x0, map_bytes); + bitmap_zero(map_storep, map_size); /* Map first 1KB for partition table */ if (scsi_debug_num_parts) @@ -3458,12 +3433,6 @@ static int __init scsi_debug_init(void) ret); goto bus_unreg; } - ret = do_create_driverfs_files(); - if (ret < 0) { - printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n", - ret); - goto del_files; - } init_all_queued(); @@ -3484,9 +3453,6 @@ static int __init scsi_debug_init(void) } return 0; -del_files: - do_remove_driverfs_files(); - driver_unregister(&sdebug_driverfs_driver); bus_unreg: bus_unregister(&pseudo_lld_bus); dev_unreg: @@ -3508,7 +3474,6 @@ static void __exit scsi_debug_exit(void) stop_all_queued(); for (; k; k--) sdebug_remove_adapter(); - do_remove_driverfs_files(); driver_unregister(&sdebug_driverfs_driver); bus_unregister(&pseudo_lld_bus); root_device_unregister(pseudo_primary); @@ -3619,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) int inj_transport = 0; int inj_dif = 0; int inj_dix = 0; + int inj_short = 0; int delay_override = 0; int unmap = 0; @@ -3664,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) inj_dif = 1; /* to reads and writes below */ else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts) inj_dix = 1; /* to reads and writes below */ + else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts) + inj_short = 1; } if (devip->wlun) { @@ -3780,6 +3748,10 @@ read: if (scsi_debug_fake_rw) break; get_data_transfer_info(cmd, &lba, &num, &ei_lba); + + if (inj_short) + num /= 2; + errsts = resp_read(SCpnt, lba, num, devip, ei_lba); if (inj_recovered && (0 == errsts)) { mk_sense_buffer(devip, RECOVERED_ERROR, @@ -3957,7 +3929,8 @@ write: static DEF_SCSI_QCMD(scsi_debug_queuecommand) static struct scsi_host_template sdebug_driver_template = { - .proc_info = scsi_debug_proc_info, + .show_info = scsi_debug_show_info, + .write_info = scsi_debug_write_info, .proc_name = sdebug_proc_name, .name = "SCSI DEBUG", .info = scsi_debug_info, @@ -3990,6 +3963,8 @@ static int sdebug_driver_probe(struct device * dev) sdbg_host = to_sdebug_host(dev); sdebug_driver_template.can_queue = scsi_debug_max_queue; + if (scsi_debug_clustering) + sdebug_driver_template.use_clustering = ENABLE_CLUSTERING; hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); if (NULL == hpnt) { printk(KERN_ERR "%s: scsi_register failed\n", __func__); @@ -4097,4 +4072,5 @@ static struct bus_type pseudo_lld_bus = { .match = pseudo_lld_bus_match, .probe = sdebug_driver_probe, .remove = sdebug_driver_remove, + .drv_groups = sdebug_drv_groups, }; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 43fca9170bf..f969aca0b54 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -228,6 +228,7 @@ static struct { {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, {"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */ {"SEAGATE", "ST3390N", "9546", BLIST_NOTQ}, + {"SEAGATE", "ST900MM0006", NULL, BLIST_SKIP_VPD_PAGES}, {"SGI", "RAID3", "*", BLIST_SPARSELUN}, {"SGI", "RAID5", "*", BLIST_SPARSELUN}, {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index c1b05a83d40..7e957918f33 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -44,8 +45,6 @@ static void scsi_eh_done(struct scsi_cmnd *scmd); -#define SENSE_TIMEOUT (10*HZ) - /* * These should *probably* be handled by the host itself. * Since it is allowed to sleep, it probably should. @@ -54,6 +53,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd); #define HOST_RESET_SETTLE_TIME (10) static int scsi_eh_try_stu(struct scsi_cmnd *scmd); +static int scsi_try_to_abort_cmd(struct scsi_host_template *, + struct scsi_cmnd *); /* called with shost->host_lock held */ void scsi_eh_wakeup(struct Scsi_Host *shost) @@ -88,6 +89,140 @@ void scsi_schedule_eh(struct Scsi_Host *shost) } EXPORT_SYMBOL_GPL(scsi_schedule_eh); +static int scsi_host_eh_past_deadline(struct Scsi_Host *shost) +{ + if (!shost->last_reset || shost->eh_deadline == -1) + return 0; + + /* + * 32bit accesses are guaranteed to be atomic + * (on all supported architectures), so instead + * of using a spinlock we can as well double check + * if eh_deadline has been set to 'off' during the + * time_before call. + */ + if (time_before(jiffies, shost->last_reset + shost->eh_deadline) && + shost->eh_deadline > -1) + return 0; + + return 1; +} + +/** + * scmd_eh_abort_handler - Handle command aborts + * @work: command to be aborted. + */ +void +scmd_eh_abort_handler(struct work_struct *work) +{ + struct scsi_cmnd *scmd = + container_of(work, struct scsi_cmnd, abort_work.work); + struct scsi_device *sdev = scmd->device; + int rtn; + + if (scsi_host_eh_past_deadline(sdev->host)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "scmd %p eh timeout, not aborting\n", + scmd)); + } else { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "aborting command %p\n", scmd)); + rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); + if (rtn == SUCCESS) { + set_host_byte(scmd, DID_TIME_OUT); + if (scsi_host_eh_past_deadline(sdev->host)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "scmd %p eh timeout, " + "not retrying aborted " + "command\n", scmd)); + } else if (!scsi_noretry_cmd(scmd) && + (++scmd->retries <= scmd->allowed)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "scmd %p retry " + "aborted command\n", scmd)); + scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); + return; + } else { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "scmd %p finish " + "aborted command\n", scmd)); + scsi_finish_command(scmd); + return; + } + } else { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "scmd %p abort failed, rtn %d\n", + scmd, rtn)); + } + } + + if (!scsi_eh_scmd_add(scmd, 0)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "scmd %p terminate " + "aborted command\n", scmd)); + set_host_byte(scmd, DID_TIME_OUT); + scsi_finish_command(scmd); + } +} + +/** + * scsi_abort_command - schedule a command abort + * @scmd: scmd to abort. + * + * We only need to abort commands after a command timeout + */ +static int +scsi_abort_command(struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + unsigned long flags; + + if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { + /* + * Retry after abort failed, escalate to next level. + */ + scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED; + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "scmd %p previous abort failed\n", scmd)); + cancel_delayed_work(&scmd->abort_work); + return FAILED; + } + + /* + * Do not try a command abort if + * SCSI EH has already started. + */ + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_in_recovery(shost)) { + spin_unlock_irqrestore(shost->host_lock, flags); + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "scmd %p not aborting, host in recovery\n", + scmd)); + return FAILED; + } + + if (shost->eh_deadline != -1 && !shost->last_reset) + shost->last_reset = jiffies; + spin_unlock_irqrestore(shost->host_lock, flags); + + scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "scmd %p abort scheduled\n", scmd)); + queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100); + return SUCCESS; +} + /** * scsi_eh_scmd_add - add scsi cmd to error handling. * @scmd: scmd to run eh on. @@ -110,7 +245,12 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) goto out_unlock; + if (shost->eh_deadline != -1 && !shost->last_reset) + shost->last_reset = jiffies; + ret = 1; + if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) + eh_flag &= ~SCSI_EH_CANCEL_CMD; scmd->eh_eflags |= eh_flag; list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); shost->host_failed++; @@ -139,16 +279,23 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) trace_scsi_dispatch_cmd_timeout(scmd); scsi_log_completion(scmd, TIMEOUT_ERROR); + if (host->eh_deadline != -1 && !host->last_reset) + host->last_reset = jiffies; + if (host->transportt->eh_timed_out) rtn = host->transportt->eh_timed_out(scmd); else if (host->hostt->eh_timed_out) rtn = host->hostt->eh_timed_out(scmd); - scmd->result |= DID_TIME_OUT << 16; + if (rtn == BLK_EH_NOT_HANDLED) { + if (!host->hostt->no_async_abort && + scsi_abort_command(scmd) == SUCCESS) + return BLK_EH_NOT_HANDLED; - if (unlikely(rtn == BLK_EH_NOT_HANDLED && - !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) - rtn = BLK_EH_HANDLED; + set_host_byte(scmd, DID_TIME_OUT); + if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) + rtn = BLK_EH_HANDLED; + } return rtn; } @@ -224,12 +371,80 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, } #endif + /** + * scsi_report_lun_change - Set flag on all *other* devices on the same target + * to indicate that a UNIT ATTENTION is expected. + * @sdev: Device reporting the UNIT ATTENTION + */ +static void scsi_report_lun_change(struct scsi_device *sdev) +{ + sdev->sdev_target->expecting_lun_change = 1; +} + +/** + * scsi_report_sense - Examine scsi sense information and log messages for + * certain conditions, also issue uevents for some of them. + * @sdev: Device reporting the sense code + * @sshdr: sshdr to be examined + */ +static void scsi_report_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sshdr) +{ + enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */ + + if (sshdr->sense_key == UNIT_ATTENTION) { + if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) { + evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Inquiry data has changed"); + } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) { + evt_type = SDEV_EVT_LUN_CHANGE_REPORTED; + scsi_report_lun_change(sdev); + sdev_printk(KERN_WARNING, sdev, + "Warning! Received an indication that the " + "LUN assignments on this target have " + "changed. The Linux SCSI layer does not " + "automatically remap LUN assignments.\n"); + } else if (sshdr->asc == 0x3f) + sdev_printk(KERN_WARNING, sdev, + "Warning! Received an indication that the " + "operating parameters on this target have " + "changed. The Linux SCSI layer does not " + "automatically adjust these parameters.\n"); + + if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) { + evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Warning! Received an indication that the " + "LUN reached a thin provisioning soft " + "threshold.\n"); + } + + if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) { + evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Mode parameters changed"); + } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) { + evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED; + sdev_printk(KERN_WARNING, sdev, + "Capacity data has changed"); + } else if (sshdr->asc == 0x2a) + sdev_printk(KERN_WARNING, sdev, + "Parameters changed"); + } + + if (evt_type != SDEV_EVT_MAXBITS) { + set_bit(evt_type, sdev->pending_events); + schedule_work(&sdev->event_work); + } +} + /** * scsi_check_sense - Examine scsi cmd sense * @scmd: Cmd to have sense checked. * * Return value: - * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR + * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE * * Notes: * When a deferred error is detected the current command has @@ -251,6 +466,8 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) */ return SUCCESS; + scsi_report_sense(sdev, &sshdr); + if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; @@ -316,6 +533,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) } } /* + * we might also expect a cc/ua if another LUN on the target + * reported a UA with an ASC/ASCQ of 3F 0E - + * REPORTED LUNS DATA HAS CHANGED. + */ + if (scmd->device->sdev_target->expecting_lun_change && + sshdr.asc == 0x3f && sshdr.ascq == 0x0e) + return NEEDS_RETRY; + /* * if the device is in the process of becoming ready, we * should retry. */ @@ -328,26 +553,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->allow_restart && (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) return FAILED; - - if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e) - scmd_printk(KERN_WARNING, scmd, - "Warning! Received an indication that the " - "LUN assignments on this target have " - "changed. The Linux SCSI layer does not " - "automatically remap LUN assignments.\n"); - else if (sshdr.asc == 0x3f) - scmd_printk(KERN_WARNING, scmd, - "Warning! Received an indication that the " - "operating parameters on this target have " - "changed. The Linux SCSI layer does not " - "automatically adjust these parameters.\n"); - - if (sshdr.asc == 0x38 && sshdr.ascq == 0x07) - scmd_printk(KERN_WARNING, scmd, - "Warning! Received an indication that the " - "LUN reached a thin provisioning soft " - "threshold.\n"); - /* * Pass the UA upwards for a determination in the completion * functions. @@ -355,18 +560,25 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) return SUCCESS; /* these are not supported */ + case DATA_PROTECT: + if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) { + /* Thin provisioning hard threshold reached */ + set_host_byte(scmd, DID_ALLOC_FAILURE); + return SUCCESS; + } case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: case BLANK_CHECK: - case DATA_PROTECT: - return TARGET_ERROR; + set_host_byte(scmd, DID_TARGET_FAILURE); + return SUCCESS; case MEDIUM_ERROR: if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ sshdr.asc == 0x13 || /* AMNF DATA FIELD */ sshdr.asc == 0x14) { /* RECORD NOT FOUND */ - return TARGET_ERROR; + set_host_byte(scmd, DID_MEDIUM_ERROR); + return SUCCESS; } return NEEDS_RETRY; @@ -374,14 +586,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->retry_hwerror) return ADD_TO_MLQUEUE; else - return TARGET_ERROR; + set_host_byte(scmd, DID_TARGET_FAILURE); case ILLEGAL_REQUEST: if (sshdr.asc == 0x20 || /* Invalid command operation code */ sshdr.asc == 0x21 || /* Logical block address out of range */ sshdr.asc == 0x24 || /* Invalid field in cdb */ sshdr.asc == 0x26) { /* Parameter value invalid */ - return TARGET_ERROR; + set_host_byte(scmd, DID_TARGET_FAILURE); } return SUCCESS; @@ -537,7 +749,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) /** * scsi_try_host_reset - ask host adapter to reset itself - * @scmd: SCSI cmd to send hsot reset. + * @scmd: SCSI cmd to send host reset. */ static int scsi_try_host_reset(struct scsi_cmnd *scmd) { @@ -709,10 +921,12 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, ses->prot_op = scmd->prot_op; scmd->prot_op = SCSI_PROT_NORMAL; + scmd->eh_eflags = 0; scmd->cmnd = ses->eh_cmnd; memset(scmd->cmnd, 0, BLK_MAX_CDB); memset(&scmd->sdb, 0, sizeof(scmd->sdb)); scmd->request->next_rq = NULL; + scmd->result = 0; if (sense_bytes) { scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, @@ -791,32 +1005,49 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; DECLARE_COMPLETION_ONSTACK(done); - unsigned long timeleft; + unsigned long timeleft = timeout; struct scsi_eh_save ses; + const unsigned long stall_for = msecs_to_jiffies(100); int rtn; +retry: scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); shost->eh_action = &done; scsi_log_send(scmd); scmd->scsi_done = scsi_eh_done; - shost->hostt->queuecommand(shost, scmd); - - timeleft = wait_for_completion_timeout(&done, timeout); + rtn = shost->hostt->queuecommand(shost, scmd); + if (rtn) { + if (timeleft > stall_for) { + scsi_eh_restore_cmnd(scmd, &ses); + timeleft -= stall_for; + msleep(jiffies_to_msecs(stall_for)); + goto retry; + } + /* signal not to enter either branch of the if () below */ + timeleft = 0; + rtn = NEEDS_RETRY; + } else { + timeleft = wait_for_completion_timeout(&done, timeout); + rtn = SUCCESS; + } shost->eh_action = NULL; - scsi_log_completion(scmd, SUCCESS); + scsi_log_completion(scmd, rtn); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, timeleft: %ld\n", __func__, scmd, timeleft)); /* - * If there is time left scsi_eh_done got called, and we will - * examine the actual status codes to see whether the command - * actually did complete normally, else tell the host to forget - * about this command. + * If there is time left scsi_eh_done got called, and we will examine + * the actual status codes to see whether the command actually did + * complete normally, else if we have a zero return and no time left, + * the command must still be pending, so abort it and return FAILED. + * If we never actually managed to issue the command, because + * ->queuecommand() kept returning non zero, use the rtn = FAILED + * value above (so don't execute either branch of the if) */ if (timeleft) { rtn = scsi_eh_completed_normally(scmd); @@ -828,7 +1059,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, case SUCCESS: case NEEDS_RETRY: case FAILED: - case TARGET_ERROR: break; case ADD_TO_MLQUEUE: rtn = NEEDS_RETRY; @@ -837,19 +1067,13 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, rtn = FAILED; break; } - } else { + } else if (!rtn) { scsi_abort_eh_cmnd(scmd); rtn = FAILED; } scsi_eh_restore_cmnd(scmd, &ses); - if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { - struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); - if (sdrv->eh_action) - rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn); - } - return rtn; } @@ -864,7 +1088,17 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, */ static int scsi_request_sense(struct scsi_cmnd *scmd) { - return scsi_send_eh_cmnd(scmd, NULL, 0, SENSE_TIMEOUT, ~0); + return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0); +} + +static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) +{ + if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { + struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); + if (sdrv->eh_action) + rtn = sdrv->eh_action(scmd, rtn); + } + return rtn; } /** @@ -911,6 +1145,7 @@ int scsi_eh_get_sense(struct list_head *work_q, struct list_head *done_q) { struct scsi_cmnd *scmd, *next; + struct Scsi_Host *shost; int rtn; list_for_each_entry_safe(scmd, next, work_q, eh_entry) { @@ -918,6 +1153,23 @@ int scsi_eh_get_sense(struct list_head *work_q, SCSI_SENSE_VALID(scmd)) continue; + shost = scmd->device->host; + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + break; + } + if (status_byte(scmd->result) != CHECK_CONDITION) + /* + * don't request sense if there's no check condition + * status because the error we're processing isn't one + * that has a sense code (and some devices get + * confused by sense requests out of the blue) + */ + continue; + SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, "%s: requesting sense\n", current->comm)); @@ -965,7 +1217,8 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd) int retry_cnt = 1, rtn; retry_tur: - rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); + rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, + scmd->device->eh_timeout, 0); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", __func__, scmd, rtn)); @@ -1007,6 +1260,18 @@ static int scsi_eh_test_devices(struct list_head *cmd_list, scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); sdev = scmd->device; + if (!try_stu) { + if (scsi_host_eh_past_deadline(sdev->host)) { + /* Push items back onto work_q */ + list_splice_init(cmd_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, sdev->host, + "skip %s, past eh deadline", + __func__)); + break; + } + } + finish_cmds = !scsi_device_online(scmd->device) || (try_stu && !scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || @@ -1014,7 +1279,9 @@ static int scsi_eh_test_devices(struct list_head *cmd_list, list_for_each_entry_safe(scmd, next, cmd_list, eh_entry) if (scmd->device == sdev) { - if (finish_cmds) + if (finish_cmds && + (try_stu || + scsi_eh_action(scmd, SUCCESS) == SUCCESS)) scsi_eh_finish_cmd(scmd, done_q); else list_move_tail(&scmd->eh_entry, work_q); @@ -1042,26 +1309,38 @@ static int scsi_eh_abort_cmds(struct list_head *work_q, struct scsi_cmnd *scmd, *next; LIST_HEAD(check_list); int rtn; + struct Scsi_Host *shost; list_for_each_entry_safe(scmd, next, work_q, eh_entry) { if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) continue; + shost = scmd->device->host; + if (scsi_host_eh_past_deadline(shost)) { + list_splice_init(&check_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + return list_empty(work_q); + } SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" "0x%p\n", current->comm, scmd)); - rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); - if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { - scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; - if (rtn == FAST_IO_FAIL) - scsi_eh_finish_cmd(scmd, done_q); - else - list_move_tail(&scmd->eh_entry, &check_list); - } else + rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); + if (rtn == FAILED) { SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" " cmd failed:" "0x%p\n", current->comm, scmd)); + list_splice_init(&check_list, work_q); + return list_empty(work_q); + } + scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; + if (rtn == FAST_IO_FAIL) + scsi_eh_finish_cmd(scmd, done_q); + else + list_move_tail(&scmd->eh_entry, &check_list); } return scsi_eh_test_devices(&check_list, work_q, done_q, 0); @@ -1109,6 +1388,13 @@ static int scsi_eh_stu(struct Scsi_Host *shost, struct scsi_device *sdev; shost_for_each_device(sdev, shost) { + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + break; + } stu_scmd = NULL; list_for_each_entry(scmd, work_q, eh_entry) if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && @@ -1128,7 +1414,8 @@ static int scsi_eh_stu(struct Scsi_Host *shost, !scsi_eh_tur(stu_scmd)) { list_for_each_entry_safe(scmd, next, work_q, eh_entry) { - if (scmd->device == sdev) + if (scmd->device == sdev && + scsi_eh_action(scmd, SUCCESS) == SUCCESS) scsi_eh_finish_cmd(scmd, done_q); } } @@ -1164,6 +1451,13 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, int rtn; shost_for_each_device(sdev, shost) { + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + break; + } bdr_scmd = NULL; list_for_each_entry(scmd, work_q, eh_entry) if (scmd->device == sdev) { @@ -1184,7 +1478,8 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, !scsi_eh_tur(bdr_scmd)) { list_for_each_entry_safe(scmd, next, work_q, eh_entry) { - if (scmd->device == sdev) + if (scmd->device == sdev && + scsi_eh_action(scmd, rtn) != FAILED) scsi_eh_finish_cmd(scmd, done_q); } @@ -1224,6 +1519,17 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, int rtn; unsigned int id; + if (scsi_host_eh_past_deadline(shost)) { + /* push back on work queue for further processing */ + list_splice_init(&check_list, work_q); + list_splice_init(&tmp_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + return list_empty(work_q); + } + scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); id = scmd_id(scmd); @@ -1276,6 +1582,15 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, */ for (channel = 0; channel <= shost->max_channel; channel++) { + if (scsi_host_eh_past_deadline(shost)) { + list_splice_init(&check_list, work_q); + SCSI_LOG_ERROR_RECOVERY(3, + shost_printk(KERN_INFO, shost, + "skip %s, past eh deadline\n", + __func__)); + return list_empty(work_q); + } + chan_scmd = NULL; list_for_each_entry(scmd, work_q, eh_entry) { if (channel == scmd_channel(scmd)) { @@ -1375,7 +1690,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, } /** - * scsi_noretry_cmd - determinte if command should be failed fast + * scsi_noretry_cmd - determine if command should be failed fast * @scmd: SCSI cmd to examine. */ int scsi_noretry_cmd(struct scsi_cmnd *scmd) @@ -1383,6 +1698,8 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd) switch (host_byte(scmd->result)) { case DID_OK: break; + case DID_TIME_OUT: + goto check_type; case DID_BUS_BUSY: return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT); case DID_PARITY: @@ -1396,18 +1713,19 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd) return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); } - switch (status_byte(scmd->result)) { - case CHECK_CONDITION: - /* - * assume caller has checked sense and determinted - * the check condition was retryable. - */ - if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || - scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) - return 1; - } + if (status_byte(scmd->result) != CHECK_CONDITION) + return 0; - return 0; +check_type: + /* + * assume caller has checked sense and determined + * the check condition was retryable. + */ + if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || + scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) + return 1; + else + return 0; } /** @@ -1457,9 +1775,13 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * looks good. drop through, and check the next byte. */ break; + case DID_ABORT: + if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { + set_host_byte(scmd, DID_TIME_OUT); + return SUCCESS; + } case DID_NO_CONNECT: case DID_BAD_TARGET: - case DID_ABORT: /* * note - this means that we just report the status back * to the top level driver, not that we actually think @@ -1552,6 +1874,8 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) */ return ADD_TO_MLQUEUE; case GOOD: + if (scmd->cmnd[0] == REPORT_LUNS) + scmd->device->sdev_target->expecting_lun_change = 0; scsi_handle_queue_ramp_up(scmd->device); case COMMAND_TERMINATED: return SUCCESS; @@ -1561,14 +1885,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) rtn = scsi_check_sense(scmd); if (rtn == NEEDS_RETRY) goto maybe_retry; - else if (rtn == TARGET_ERROR) { - /* - * Need to modify host byte to signal a - * permanent target failure - */ - set_host_byte(scmd, DID_TARGET_FAILURE); - rtn = SUCCESS; - } /* if rtn == FAILED, we have no sense information; * returning FAILED will wake the error handler thread * to collect the sense and redo the decide @@ -1636,6 +1952,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) */ req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); + blk_rq_set_block_pc(req); + req->cmd[0] = ALLOW_MEDIUM_REMOVAL; req->cmd[1] = 0; req->cmd[2] = 0; @@ -1645,7 +1963,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) req->cmd_len = COMMAND_SIZE(req->cmd[0]); - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_QUIET; req->timeout = 10 * HZ; req->retries = 5; @@ -1681,8 +1998,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost) * will be requests for character device operations, and also for * ioctls to queued block devices. */ - SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", - __func__)); + SCSI_LOG_ERROR_RECOVERY(3, + printk("scsi_eh_%d waking up host to restart\n", + shost->host_no)); spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_RUNNING)) @@ -1809,6 +2127,10 @@ static void scsi_unjam_host(struct Scsi_Host *shost) if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); + spin_lock_irqsave(shost->host_lock, flags); + if (shost->eh_deadline != -1) + shost->last_reset = 0; + spin_unlock_irqrestore(shost->host_lock, flags); scsi_eh_flush_done_q(&eh_done_q); } @@ -1835,7 +2157,7 @@ int scsi_error_handler(void *data) if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || shost->host_failed != shost->host_busy) { SCSI_LOG_ERROR_RECOVERY(1, - printk("Error handler scsi_eh_%d sleeping\n", + printk("scsi_eh_%d: sleeping\n", shost->host_no)); schedule(); continue; @@ -1843,8 +2165,9 @@ int scsi_error_handler(void *data) __set_current_state(TASK_RUNNING); SCSI_LOG_ERROR_RECOVERY(1, - printk("Error handler scsi_eh_%d waking up\n", - shost->host_no)); + printk("scsi_eh_%d: waking up %d/%d/%d\n", + shost->host_no, shost->host_eh_scheduled, + shost->host_failed, shost->host_busy)); /* * We have a host that is failing for some reason. Figure out @@ -1979,7 +2302,18 @@ scsi_reset_provider(struct scsi_device *dev, int flag) if (scsi_autopm_get_host(shost) < 0) return FAILED; + if (!get_device(&dev->sdev_gendev)) { + rtn = FAILED; + goto out_put_autopm_host; + } + scmd = scsi_get_command(dev, GFP_KERNEL); + if (!scmd) { + rtn = FAILED; + put_device(&dev->sdev_gendev); + goto out_put_autopm_host; + } + blk_rq_init(NULL, &req); scmd->request = &req; @@ -2036,6 +2370,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag) scsi_run_host_queues(shost); scsi_next_command(scmd); +out_put_autopm_host: scsi_autopm_put_host(shost); return rtn; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 765398c063c..3f50dfcb322 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -68,23 +68,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { struct kmem_cache *scsi_sdb_cache; -#ifdef CONFIG_ACPI -#include <acpi/acpi_bus.h> - -int scsi_register_acpi_bus_type(struct acpi_bus_type *bus) -{ - bus->bus = &scsi_bus_type; - return register_acpi_bus_type(bus); -} -EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type); - -void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus) -{ - unregister_acpi_bus_type(bus); -} -EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type); -#endif - /* * When to reinvoke queueing after a resource shortage. It's 3 msecs to * not change behaviour from the previous unplug mechanism, experimentation @@ -92,28 +75,6 @@ EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type); */ #define SCSI_QUEUE_DELAY 3 -/* - * Function: scsi_unprep_request() - * - * Purpose: Remove all preparation done for a request, including its - * associated scsi_cmnd, so that it can be requeued. - * - * Arguments: req - request to unprepare - * - * Lock status: Assumed that no locks are held upon entry. - * - * Returns: Nothing. - */ -static void scsi_unprep_request(struct request *req) -{ - struct scsi_cmnd *cmd = req->special; - - blk_unprep_request(req); - req->special = NULL; - - scsi_put_command(cmd); -} - /** * __scsi_queue_insert - private queue insertion * @cmd: The SCSI command being requeued @@ -176,9 +137,10 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) * lock such that the kblockd_schedule_work() call happens * before blk_cleanup_queue() finishes. */ + cmd->result = 0; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); - kblockd_schedule_work(q, &device->requeue_work); + kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -223,7 +185,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) */ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, int flags, + unsigned char *sense, int timeout, int retries, u64 flags, int *resid) { struct request *req; @@ -233,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); if (!req) return ret; + blk_rq_set_block_pc(req); if (bufflen && blk_rq_map_kern(sdev->request_queue, req, buffer, bufflen, __GFP_WAIT)) @@ -244,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req->sense_len = 0; req->retries = retries; req->timeout = timeout; - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; /* @@ -271,11 +233,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid) + int *resid, u64 flags) { char *sense = NULL; int result; @@ -286,14 +247,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, return DRIVER_ERROR << 24; } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0, resid); + sense, timeout, retries, flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); kfree(sense); return result; } -EXPORT_SYMBOL(scsi_execute_req); +EXPORT_SYMBOL(scsi_execute_req_flags); /* * Function: scsi_init_cmd_errh() @@ -403,33 +364,18 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) return 0; } -/* - * Function: scsi_run_queue() - * - * Purpose: Select a proper request queue to serve next - * - * Arguments: q - last request's queue - * - * Returns: Nothing - * - * Notes: The previous command was completely finished, start - * a new one if possible. - */ -static void scsi_run_queue(struct request_queue *q) +static void scsi_starved_list_run(struct Scsi_Host *shost) { - struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost; LIST_HEAD(starved_list); + struct scsi_device *sdev; unsigned long flags; - shost = sdev->host; - if (scsi_target(sdev)->single_lun) - scsi_single_lun_run(sdev); - spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->starved_list, &starved_list); while (!list_empty(&starved_list)) { + struct request_queue *slq; + /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn @@ -452,15 +398,51 @@ static void scsi_run_queue(struct request_queue *q) continue; } - spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); - __blk_run_queue(sdev->request_queue); - spin_unlock(sdev->request_queue->queue_lock); - spin_lock(shost->host_lock); + /* + * Once we drop the host lock, a racing scsi_remove_device() + * call may remove the sdev from the starved list and destroy + * it and the queue. Mitigate by taking a reference to the + * queue and never touching the sdev again after we drop the + * host lock. Note: if __scsi_remove_device() invokes + * blk_cleanup_queue() before the queue is run from this + * function then blk_run_queue() will return immediately since + * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. + */ + slq = sdev->request_queue; + if (!blk_get_queue(slq)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + + blk_run_queue(slq); + blk_put_queue(slq); + + spin_lock_irqsave(shost->host_lock, flags); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); spin_unlock_irqrestore(shost->host_lock, flags); +} + +/* + * Function: scsi_run_queue() + * + * Purpose: Select a proper request queue to serve next + * + * Arguments: q - last request's queue + * + * Returns: Nothing + * + * Notes: The previous command was completely finished, start + * a new one if possible. + */ +static void scsi_run_queue(struct request_queue *q) +{ + struct scsi_device *sdev = q->queuedata; + + if (scsi_target(sdev)->single_lun) + scsi_single_lun_run(sdev); + if (!list_empty(&sdev->host->starved_list)) + scsi_starved_list_run(sdev->host); blk_run_queue(q); } @@ -499,16 +481,10 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) struct request *req = cmd->request; unsigned long flags; - /* - * We need to hold a reference on the device to avoid the queue being - * killed after the unlock and before scsi_run_queue is invoked which - * may happen because scsi_unprep_request() puts the command which - * releases its reference on the device. - */ - get_device(&sdev->sdev_gendev); - spin_lock_irqsave(q->queue_lock, flags); - scsi_unprep_request(req); + blk_unprep_request(req); + req->special = NULL; + scsi_put_command(cmd); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); @@ -522,13 +498,9 @@ void scsi_next_command(struct scsi_cmnd *cmd) struct scsi_device *sdev = cmd->device; struct request_queue *q = sdev->request_queue; - /* need to hold a reference on the device before we let go of the cmd */ - get_device(&sdev->sdev_gendev); - scsi_put_command(cmd); scsi_run_queue(q); - /* ok to remove device now */ put_device(&sdev->sdev_gendev); } @@ -540,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) scsi_run_queue(sdev->request_queue); } -static void __scsi_release_buffers(struct scsi_cmnd *, int); - -/* - * Function: scsi_end_request() - * - * Purpose: Post-processing of completed commands (usually invoked at end - * of upper level post-processing and scsi_io_completion). - * - * Arguments: cmd - command that is complete. - * error - 0 if I/O indicates success, < 0 for I/O error. - * bytes - number of bytes of completed I/O - * requeue - indicates whether we should requeue leftovers. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns: cmd if requeue required, NULL otherwise. - * - * Notes: This is called for block device requests in order to - * mark some number of sectors as complete. - * - * We are guaranteeing that the request queue will be goosed - * at some point during this call. - * Notes: If cmd was requeued, upon return it will be a stale pointer. - */ -static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, - int bytes, int requeue) -{ - struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; - - /* - * If there are blocks left over at the end, set up the command - * to queue the remainder of them. - */ - if (blk_end_request(req, error, bytes)) { - /* kill remainder if no retrys */ - if (error && scsi_noretry_cmd(cmd)) - blk_end_request_all(req, error); - else { - if (requeue) { - /* - * Bleah. Leftovers again. Stick the - * leftovers in the front of the - * queue, and goose the queue again. - */ - scsi_release_buffers(cmd); - scsi_requeue_command(q, cmd); - cmd = NULL; - } - return cmd; - } - } - - /* - * This will goose the queue request function at the end, so we don't - * need to worry about launching another command. - */ - __scsi_release_buffers(cmd, 0); - scsi_next_command(cmd); - return NULL; -} - static inline unsigned int scsi_sgtable_index(unsigned short nents) { unsigned int index; @@ -653,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); } -static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) -{ - - if (cmd->sdb.table.nents) - scsi_free_sgtable(&cmd->sdb); - - memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - - if (do_bidi_check && scsi_bidi_cmnd(cmd)) { - struct scsi_data_buffer *bidi_sdb = - cmd->request->next_rq->special; - scsi_free_sgtable(bidi_sdb); - kmem_cache_free(scsi_sdb_cache, bidi_sdb); - cmd->request->next_rq->special = NULL; - } - - if (scsi_prot_sg_count(cmd)) - scsi_free_sgtable(cmd->prot_sdb); -} - /* * Function: scsi_release_buffers() * - * Purpose: Completion processing for block device I/O requests. + * Purpose: Free resources allocate for a scsi_command. * * Arguments: cmd - command that we are bailing. * @@ -687,15 +577,43 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) * Notes: In the event that an upper level driver rejects a * command, we must release resources allocated during * the __init_io() function. Primarily this would involve - * the scatter-gather table, and potentially any bounce - * buffers. + * the scatter-gather table. */ void scsi_release_buffers(struct scsi_cmnd *cmd) { - __scsi_release_buffers(cmd, 1); + if (cmd->sdb.table.nents) + scsi_free_sgtable(&cmd->sdb); + + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + + if (scsi_prot_sg_count(cmd)) + scsi_free_sgtable(cmd->prot_sdb); } EXPORT_SYMBOL(scsi_release_buffers); +static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) +{ + struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; + + scsi_free_sgtable(bidi_sdb); + kmem_cache_free(scsi_sdb_cache, bidi_sdb); + cmd->request->next_rq->special = NULL; +} + +/** + * __scsi_error_from_host_byte - translate SCSI error code into errno + * @cmd: SCSI command (unused) + * @result: scsi error code + * + * Translate SCSI error code into standard UNIX errno. + * Return values: + * -ENOLINK temporary transport failure + * -EREMOTEIO permanent target failure, do not retry + * -EBADE permanent nexus failure, retry on other path + * -ENOSPC No write space available + * -ENODATA Medium error + * -EIO unspecified I/O error + */ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) { int error = 0; @@ -712,6 +630,14 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) set_host_byte(cmd, DID_OK); error = -EBADE; break; + case DID_ALLOC_FAILURE: + set_host_byte(cmd, DID_OK); + error = -ENOSPC; + break; + case DID_MEDIUM_ERROR: + set_host_byte(cmd, DID_OK); + error = -ENODATA; + break; default: error = -EIO; break; @@ -731,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) * * Returns: Nothing * - * Notes: This function is matched in terms of capabilities to - * the function that created the scatter-gather list. - * In other words, if there are no bounce buffers - * (the normal case for most drivers), we don't need - * the logic to deal with cleaning up afterwards. - * - * We must call scsi_end_request(). This will finish off - * the specified number of sectors. If we are done, the - * command block will be released and the queue function - * will be goosed. If we are not done then we have to + * Notes: We will finish off the specified number of sectors. If we + * are done, the command block will be released and the queue + * function will be goosed. If we are not done then we have to * figure out what to do next: * * a) We can call scsi_requeue_command(). The request @@ -749,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) * be used if we made forward progress, or if we want * to switch from READ(10) to READ(6) for example. * - * b) We can call scsi_queue_insert(). The request will + * b) We can call __scsi_queue_insert(). The request will * be put back on the queue and retried using the same * command as before, possibly after a delay. * @@ -768,6 +687,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, ACTION_DELAYED_RETRY} action; char *description = NULL; + unsigned long wait_for = (cmd->allowed + 1) * req->timeout; if (result) { sense_valid = scsi_command_normalize_sense(cmd, &sshdr); @@ -806,11 +726,21 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) req->next_rq->resid_len = scsi_in(cmd)->resid; scsi_release_buffers(cmd); + scsi_release_bidi_buffers(cmd); + blk_end_request_all(req, 0); scsi_next_command(cmd); return; } + } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { + /* + * Certain non BLOCK_PC requests are commands that don't + * actually transfer anything (FLUSH), so cannot use + * good_bytes != blk_rq_bytes(req) as the signal for an error. + * This sets the error explicitly for the problem case. + */ + error = __scsi_error_from_host_byte(cmd, result); } /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ @@ -845,12 +775,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } /* - * A number of bytes were successfully read. If there - * are leftovers and there is some kind of error - * (result != 0), retry the rest. + * If we finished all bytes in the request we are done now. */ - if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) - return; + if (!blk_end_request(req, error, good_bytes)) + goto next_command; + + /* + * Kill remainder if no retrys. + */ + if (error && scsi_noretry_cmd(cmd)) { + blk_end_request_all(req, error); + goto next_command; + } + + /* + * If there had been no error, but we have leftover bytes in the + * requeues just queue the command up again. + */ + if (result == 0) + goto requeue; error = __scsi_error_from_host_byte(cmd, result); @@ -969,10 +912,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) action = ACTION_FAIL; } + if (action != ACTION_FAIL && + time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { + action = ACTION_FAIL; + description = "Command timed out"; + } + switch (action) { case ACTION_FAIL: /* Give up and fail the remainder of the request */ - scsi_release_buffers(cmd); if (!(req->cmd_flags & REQ_QUIET)) { if (description) scmd_printk(KERN_INFO, cmd, "%s\n", @@ -982,12 +930,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_print_sense("", cmd); scsi_print_command(cmd); } - if (blk_end_request_err(req, error)) - scsi_requeue_command(q, cmd); - else - scsi_next_command(cmd); - break; + if (!blk_end_request_err(req, error)) + goto next_command; + /*FALLTHRU*/ case ACTION_REPREP: + requeue: /* Unprep the request and put it back at the head of the queue. * A new command will be prepared and issued. */ @@ -1003,6 +950,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); break; } + return; + +next_command: + scsi_release_buffers(cmd); + scsi_next_command(cmd); } static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, @@ -1018,8 +970,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1044,6 +994,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, */ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) { + struct scsi_device *sdev = cmd->device; struct request *rq = cmd->request; int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); @@ -1091,6 +1042,7 @@ err_exit: scsi_release_buffers(cmd); cmd->request->special = NULL; scsi_put_command(cmd); + put_device(&sdev->sdev_gendev); return error; } EXPORT_SYMBOL(scsi_init_io); @@ -1101,9 +1053,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, struct scsi_cmnd *cmd; if (!req->special) { + /* Bail if we can't get a reference to the device */ + if (!get_device(&sdev->sdev_gendev)) + return NULL; + cmd = scsi_get_command(sdev, GFP_ATOMIC); - if (unlikely(!cmd)) + if (unlikely(!cmd)) { + put_device(&sdev->sdev_gendev); return NULL; + } req->special = cmd; } else { cmd = req->special; @@ -1121,15 +1079,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); - - if (ret != BLKPREP_OK) - return ret; - - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; + struct scsi_cmnd *cmd = req->special; /* * BLOCK_PC requests may transfer data, in which case they must @@ -1149,7 +1099,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } cmd->cmd_len = req->cmd_len; @@ -1173,15 +1122,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); */ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); - - if (ret != BLKPREP_OK) - return ret; + struct scsi_cmnd *cmd = req->special; if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && sdev->scsi_dh_data->scsi_dh->prep_fn)) { - ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); + int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); if (ret != BLKPREP_OK) return ret; } @@ -1191,16 +1136,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) */ BUG_ON(!req->nr_phys_segments); - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; - memset(cmd->cmnd, 0, BLK_MAX_CDB); return scsi_init_io(cmd, GFP_ATOMIC); } EXPORT_SYMBOL(scsi_setup_fs_cmnd); -int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +static int +scsi_prep_state_check(struct scsi_device *sdev, struct request *req) { int ret = BLKPREP_OK; @@ -1252,9 +1194,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) } return ret; } -EXPORT_SYMBOL(scsi_prep_state_check); -int scsi_prep_return(struct request_queue *q, struct request *req, int ret) +static int +scsi_prep_return(struct request_queue *q, struct request *req, int ret) { struct scsi_device *sdev = q->queuedata; @@ -1266,6 +1208,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) struct scsi_cmnd *cmd = req->special; scsi_release_buffers(cmd); scsi_put_command(cmd); + put_device(&sdev->sdev_gendev); req->special = NULL; } break; @@ -1284,18 +1227,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) return ret; } -EXPORT_SYMBOL(scsi_prep_return); -int scsi_prep_fn(struct request_queue *q, struct request *req) +static int scsi_prep_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; - int ret = BLKPREP_KILL; + struct scsi_cmnd *cmd; + int ret; + + ret = scsi_prep_state_check(sdev, req); + if (ret != BLKPREP_OK) + goto out; - if (req->cmd_type == REQ_TYPE_BLOCK_PC) + cmd = scsi_get_cmd_from_req(sdev, req); + if (unlikely(!cmd)) { + ret = BLKPREP_DEFER; + goto out; + } + + if (req->cmd_type == REQ_TYPE_FS) + ret = scsi_cmd_to_driver(cmd)->init_command(cmd); + else if (req->cmd_type == REQ_TYPE_BLOCK_PC) ret = scsi_setup_blk_pc_cmnd(sdev, req); + else + ret = BLKPREP_KILL; + +out: return scsi_prep_return(q, req, ret); } -EXPORT_SYMBOL(scsi_prep_fn); + +static void scsi_unprep_fn(struct request_queue *q, struct request *req) +{ + if (req->cmd_type == REQ_TYPE_FS) { + struct scsi_cmnd *cmd = req->special; + struct scsi_driver *drv = scsi_cmd_to_driver(cmd); + + if (drv->uninit_command) + drv->uninit_command(cmd); + } +} /* * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else @@ -1523,16 +1492,14 @@ static void scsi_softirq_done(struct request *rq) * Lock status: IO request lock assumed to be held when called. */ static void scsi_request_fn(struct request_queue *q) + __releases(q->queue_lock) + __acquires(q->queue_lock) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; struct scsi_cmnd *cmd; struct request *req; - if(!get_device(&sdev->sdev_gendev)) - /* We must be tearing the block queue down already */ - return; - /* * To start with, we keep looping until the queue is empty, or until * the host is no longer able to accept any more requests. @@ -1621,7 +1588,7 @@ static void scsi_request_fn(struct request_queue *q) goto out_delay; } - goto out; + return; not_ready: spin_unlock_irq(shost->host_lock); @@ -1640,12 +1607,6 @@ static void scsi_request_fn(struct request_queue *q) out_delay: if (sdev->device_busy == 0) blk_delay_queue(q, SCSI_QUEUE_DELAY); -out: - /* must be careful here...if we trigger the ->remove() function - * we cannot be holding the q lock */ - spin_unlock_irq(q->queue_lock); - put_device(&sdev->sdev_gendev); - spin_lock_irq(q->queue_lock); } u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) @@ -1664,7 +1625,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) host_dev = scsi_get_device(shost); if (host_dev && host_dev->dma_mask) - bounce_limit = *host_dev->dma_mask; + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; return bounce_limit; } @@ -1724,6 +1685,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) return NULL; blk_queue_prep_rq(q, scsi_prep_fn); + blk_queue_unprep_rq(q, scsi_unprep_fn); blk_queue_softirq_done(q, scsi_softirq_done); blk_queue_rq_timed_out(q, scsi_times_out); blk_queue_lld_busy(q, scsi_lld_busy); @@ -2173,6 +2135,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_OFFLINE: case SDEV_TRANSPORT_OFFLINE: case SDEV_CANCEL: + case SDEV_CREATED_BLOCK: break; default: goto illegal; @@ -2210,7 +2173,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) case SDEV_EVT_MEDIA_CHANGE: envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; - + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; + break; + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; + break; + case SDEV_EVT_LUN_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; + break; default: /* do nothing */ break; @@ -2231,10 +2208,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) void scsi_evt_thread(struct work_struct *work) { struct scsi_device *sdev; + enum scsi_device_event evt_type; LIST_HEAD(event_list); sdev = container_of(work, struct scsi_device, event_work); + for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) + if (test_and_clear_bit(evt_type, sdev->pending_events)) + sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); + while (1) { struct scsi_event *evt; struct list_head *this, *tmp; @@ -2304,6 +2286,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, /* evt_type-specific initialization, if any */ switch (evt_type) { case SDEV_EVT_MEDIA_CHANGE: + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + case SDEV_EVT_LUN_CHANGE_REPORTED: default: /* do nothing */ break; diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 65123a21b97..109802f776e 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) u32 rlen; int err, tport; - while (skb->len >= NLMSG_SPACE(0)) { + while (skb->len >= NLMSG_HDRLEN) { err = 0; nlh = nlmsg_hdr(skb); @@ -70,14 +70,14 @@ scsi_nl_rcv_msg(struct sk_buff *skb) goto next_msg; } - hdr = NLMSG_DATA(nlh); + hdr = nlmsg_data(nlh); if ((hdr->version != SCSI_NL_VERSION) || (hdr->magic != SCSI_NL_MAGIC)) { err = -EPROTOTYPE; goto next_msg; } - if (!capable(CAP_SYS_ADMIN)) { + if (!netlink_capable(skb, CAP_SYS_ADMIN)) { err = -EPERM; goto next_msg; } diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 8f6b12cbd22..7454498c409 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -16,37 +16,79 @@ #include "scsi_priv.h" -static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *)) +#ifdef CONFIG_PM_SLEEP + +static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->suspend ? pm->suspend(dev) : 0; +} + +static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->freeze ? pm->freeze(dev) : 0; +} + +static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->poweroff ? pm->poweroff(dev) : 0; +} + +static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm) { + return pm && pm->resume ? pm->resume(dev) : 0; +} + +static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->thaw ? pm->thaw(dev) : 0; +} + +static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm) +{ + return pm && pm->restore ? pm->restore(dev) : 0; +} + +static int scsi_dev_type_suspend(struct device *dev, + int (*cb)(struct device *, const struct dev_pm_ops *)) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err; + /* flush pending in-flight resume operations, suspend is synchronous */ + async_synchronize_full_domain(&scsi_sd_pm_domain); + err = scsi_device_quiesce(to_scsi_device(dev)); if (err == 0) { - if (cb) { - err = cb(dev); - if (err) - scsi_device_resume(to_scsi_device(dev)); - } + err = cb(dev, pm); + if (err) + scsi_device_resume(to_scsi_device(dev)); } dev_dbg(dev, "scsi suspend: %d\n", err); return err; } -static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *)) +static int scsi_dev_type_resume(struct device *dev, + int (*cb)(struct device *, const struct dev_pm_ops *)) { + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err = 0; - if (cb) - err = cb(dev); + err = cb(dev, pm); scsi_device_resume(to_scsi_device(dev)); dev_dbg(dev, "scsi resume: %d\n", err); + + if (err == 0) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + return err; } -#ifdef CONFIG_PM_SLEEP - static int -scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *)) +scsi_bus_suspend_common(struct device *dev, + int (*cb)(struct device *, const struct dev_pm_ops *)) { int err = 0; @@ -54,7 +96,8 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *)) /* * All the high-level SCSI drivers that implement runtime * PM treat runtime suspend, system suspend, and system - * hibernate identically. + * hibernate nearly identically. In all cases the requirements + * for runtime suspension are stricter. */ if (pm_runtime_suspended(dev)) return 0; @@ -65,20 +108,54 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *)) return err; } -static int -scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *)) +static void async_sdev_resume(void *dev, async_cookie_t cookie) { - int err = 0; + scsi_dev_type_resume(dev, do_scsi_resume); +} - if (scsi_is_sdev_device(dev)) - err = scsi_dev_type_resume(dev, cb); +static void async_sdev_thaw(void *dev, async_cookie_t cookie) +{ + scsi_dev_type_resume(dev, do_scsi_thaw); +} - if (err == 0) { +static void async_sdev_restore(void *dev, async_cookie_t cookie) +{ + scsi_dev_type_resume(dev, do_scsi_restore); +} + +static int scsi_bus_resume_common(struct device *dev, + int (*cb)(struct device *, const struct dev_pm_ops *)) +{ + async_func_t fn; + + if (!scsi_is_sdev_device(dev)) + fn = NULL; + else if (cb == do_scsi_resume) + fn = async_sdev_resume; + else if (cb == do_scsi_thaw) + fn = async_sdev_thaw; + else if (cb == do_scsi_restore) + fn = async_sdev_restore; + else + fn = NULL; + + if (fn) { + async_schedule_domain(fn, dev, &scsi_sd_pm_domain); + + /* + * If a user has disabled async probing a likely reason + * is due to a storage enclosure that does not inject + * staggered spin-ups. For safety, make resume + * synchronous as well in that case. + */ + if (strncmp(scsi_scan_type, "async", 5) != 0) + async_synchronize_full_domain(&scsi_sd_pm_domain); + } else { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } - return err; + return 0; } static int scsi_bus_prepare(struct device *dev) @@ -96,38 +173,32 @@ static int scsi_bus_prepare(struct device *dev) static int scsi_bus_suspend(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL); + return scsi_bus_suspend_common(dev, do_scsi_suspend); } static int scsi_bus_resume(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - return scsi_bus_resume_common(dev, pm ? pm->resume : NULL); + return scsi_bus_resume_common(dev, do_scsi_resume); } static int scsi_bus_freeze(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL); + return scsi_bus_suspend_common(dev, do_scsi_freeze); } static int scsi_bus_thaw(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL); + return scsi_bus_resume_common(dev, do_scsi_thaw); } static int scsi_bus_poweroff(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL); + return scsi_bus_suspend_common(dev, do_scsi_poweroff); } static int scsi_bus_restore(struct device *dev) { - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - return scsi_bus_resume_common(dev, pm ? pm->restore : NULL); + return scsi_bus_resume_common(dev, do_scsi_restore); } #else /* CONFIG_PM_SLEEP */ @@ -144,33 +215,56 @@ static int scsi_bus_restore(struct device *dev) #ifdef CONFIG_PM_RUNTIME +static int sdev_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + struct scsi_device *sdev = to_scsi_device(dev); + int err; + + err = blk_pre_runtime_suspend(sdev->request_queue); + if (err) + return err; + if (pm && pm->runtime_suspend) + err = pm->runtime_suspend(dev); + blk_post_runtime_suspend(sdev->request_queue, err); + + return err; +} + static int scsi_runtime_suspend(struct device *dev) { int err = 0; - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; dev_dbg(dev, "scsi_runtime_suspend\n"); - if (scsi_is_sdev_device(dev)) { - err = scsi_dev_type_suspend(dev, - pm ? pm->runtime_suspend : NULL); - if (err == -EAGAIN) - pm_schedule_suspend(dev, jiffies_to_msecs( - round_jiffies_up_relative(HZ/10))); - } + if (scsi_is_sdev_device(dev)) + err = sdev_runtime_suspend(dev); /* Insert hooks here for targets, hosts, and transport classes */ return err; } +static int sdev_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int err = 0; + + blk_pre_runtime_resume(sdev->request_queue); + if (pm && pm->runtime_resume) + err = pm->runtime_resume(dev); + blk_post_runtime_resume(sdev->request_queue, err); + + return err; +} + static int scsi_runtime_resume(struct device *dev) { int err = 0; - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; dev_dbg(dev, "scsi_runtime_resume\n"); if (scsi_is_sdev_device(dev)) - err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL); + err = sdev_runtime_resume(dev); /* Insert hooks here for targets, hosts, and transport classes */ @@ -179,17 +273,17 @@ static int scsi_runtime_resume(struct device *dev) static int scsi_runtime_idle(struct device *dev) { - int err; - dev_dbg(dev, "scsi_runtime_idle\n"); /* Insert hooks here for targets, hosts, and transport classes */ - if (scsi_is_sdev_device(dev)) - err = pm_schedule_suspend(dev, 100); - else - err = pm_runtime_suspend(dev); - return err; + if (scsi_is_sdev_device(dev)) { + pm_runtime_mark_last_busy(dev); + pm_runtime_autosuspend(dev); + return -EBUSY; + } + + return 0; } int scsi_autopm_get_device(struct scsi_device *sdev) diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 8f9a0cadc29..48e5b657e79 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -19,6 +19,7 @@ struct scsi_nl_hdr; * Scsi Error Handler Flags */ #define SCSI_EH_CANCEL_CMD 0x0001 /* Cancel this cmd */ +#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */ #define SCSI_SENSE_VALID(scmd) \ (((scmd)->sense_buffer[0] & 0x70) == 0x70) @@ -66,6 +67,7 @@ extern int __init scsi_init_devinfo(void); extern void scsi_exit_devinfo(void); /* scsi_error.c */ +extern void scmd_eh_abort_handler(struct work_struct *work); extern enum blk_eh_timer_return scsi_times_out(struct request *req); extern int scsi_error_handler(void *host); extern int scsi_decide_disposition(struct scsi_cmnd *cmd); @@ -110,6 +112,7 @@ extern void scsi_exit_procfs(void); #endif /* CONFIG_PROC_FS */ /* scsi_scan.c */ +extern char scsi_scan_type[]; extern int scsi_complete_async_scans(void); extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, int); @@ -164,6 +167,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} #endif /* CONFIG_PM_RUNTIME */ +extern struct async_domain scsi_sd_pm_domain; extern struct async_domain scsi_sd_probe_domain; /* diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index ad747dc337d..86f0c5d5c11 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -45,58 +45,51 @@ static struct proc_dir_entry *proc_scsi; /* Protect sht->present and sht->proc_dir */ static DEFINE_MUTEX(global_host_template_mutex); -/** - * proc_scsi_read - handle read from /proc by calling host's proc_info() command - * @buffer: passed to proc_info - * @start: passed to proc_info - * @offset: passed to proc_info - * @length: passed to proc_info - * @eof: returns whether length read was less than requested - * @data: pointer to a &struct Scsi_Host - */ - -static int proc_scsi_read(char *buffer, char **start, off_t offset, - int length, int *eof, void *data) -{ - struct Scsi_Host *shost = data; - int n; - - n = shost->hostt->proc_info(shost, buffer, start, offset, length, 0); - *eof = (n < length); - - return n; -} - -/** - * proc_scsi_write_proc - Handle write to /proc by calling host's proc_info() - * @file: not used - * @buf: source of data to write. - * @count: number of bytes (at most PROC_BLOCK_SIZE) to write. - * @data: pointer to &struct Scsi_Host - */ -static int proc_scsi_write_proc(struct file *file, const char __user *buf, - unsigned long count, void *data) +static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) { - struct Scsi_Host *shost = data; + struct Scsi_Host *shost = PDE_DATA(file_inode(file)); ssize_t ret = -ENOMEM; char *page; - char *start; if (count > PROC_BLOCK_SIZE) return -EOVERFLOW; + if (!shost->hostt->write_info) + return -EINVAL; + page = (char *)__get_free_page(GFP_KERNEL); if (page) { ret = -EFAULT; if (copy_from_user(page, buf, count)) goto out; - ret = shost->hostt->proc_info(shost, page, &start, 0, count, 1); + ret = shost->hostt->write_info(shost, page, count); } out: free_page((unsigned long)page); return ret; } +static int proc_scsi_show(struct seq_file *m, void *v) +{ + struct Scsi_Host *shost = m->private; + return shost->hostt->show_info(m, shost); +} + +static int proc_scsi_host_open(struct inode *inode, struct file *file) +{ + return single_open_size(file, proc_scsi_show, PDE_DATA(inode), + 4 * PAGE_SIZE); +} + +static const struct file_operations proc_scsi_fops = { + .open = proc_scsi_host_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, + .write = proc_scsi_host_write +}; + /** * scsi_proc_hostdir_add - Create directory in /proc for a scsi host * @sht: owner of this directory @@ -106,7 +99,7 @@ out: void scsi_proc_hostdir_add(struct scsi_host_template *sht) { - if (!sht->proc_info) + if (!sht->show_info) return; mutex_lock(&global_host_template_mutex); @@ -125,7 +118,7 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht) */ void scsi_proc_hostdir_rm(struct scsi_host_template *sht) { - if (!sht->proc_info) + if (!sht->show_info) return; mutex_lock(&global_host_template_mutex); @@ -151,16 +144,12 @@ void scsi_proc_host_add(struct Scsi_Host *shost) return; sprintf(name,"%d", shost->host_no); - p = create_proc_read_entry(name, S_IFREG | S_IRUGO | S_IWUSR, - sht->proc_dir, proc_scsi_read, shost); - if (!p) { + p = proc_create_data(name, S_IRUGO | S_IWUSR, + sht->proc_dir, &proc_scsi_fops, shost); + if (!p) printk(KERN_ERR "%s: Failed to register host %d in" "%s\n", __func__, shost->host_no, sht->proc_name); - return; - } - - p->write_proc = proc_scsi_write_proc; } /** diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 3e58b2245f1..e02b3aab56c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns, #define SCSI_SCAN_TYPE_DEFAULT "sync" #endif -static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; +char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); MODULE_PARM_DESC(scan, "sync, async or none"); @@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget) struct Scsi_Host *shost = dev_to_shost(dev->parent); unsigned long flags; + starget->state = STARGET_DEL; transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); if (shost->hostt->target_destroy) @@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent, } /** + * scsi_target_reap_ref_release - remove target from visibility + * @kref: the reap_ref in the target being released + * + * Called on last put of reap_ref, which is the indication that no device + * under this target is visible anymore, so render the target invisible in + * sysfs. Note: we have to be in user context here because the target reaps + * should be done in places where the scsi device visibility is being removed. + */ +static void scsi_target_reap_ref_release(struct kref *kref) +{ + struct scsi_target *starget + = container_of(kref, struct scsi_target, reap_ref); + + /* + * if we get here and the target is still in the CREATED state that + * means it was allocated but never made visible (because a scan + * turned up no LUNs), so don't call device_del() on it. + */ + if (starget->state != STARGET_CREATED) { + transport_remove_device(&starget->dev); + device_del(&starget->dev); + } + scsi_target_destroy(starget); +} + +static void scsi_target_reap_ref_put(struct scsi_target *starget) +{ + kref_put(&starget->reap_ref, scsi_target_reap_ref_release); +} + +/** * scsi_alloc_target - allocate a new or find an existing target * @parent: parent of the target (need not be a scsi host) * @channel: target channel number (zero if no channels) @@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, + shost->transportt->target_size; struct scsi_target *starget; struct scsi_target *found_target; - int error; + int error, ref_got; starget = kzalloc(size, GFP_KERNEL); if (!starget) { @@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, } dev = &starget->dev; device_initialize(dev); - starget->reap_ref = 1; + kref_init(&starget->reap_ref); dev->parent = get_device(parent); dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); dev->bus = &scsi_bus_type; @@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, return starget; found: - found_target->reap_ref++; + /* + * release routine already fired if kref is zero, so if we can still + * take the reference, the target must be alive. If we can't, it must + * be dying and we need to wait for a new target + */ + ref_got = kref_get_unless_zero(&found_target->reap_ref); + spin_unlock_irqrestore(shost->host_lock, flags); - if (found_target->state != STARGET_DEL) { + if (ref_got) { put_device(dev); return found_target; } - /* Unfortunately, we found a dying target; need to - * wait until it's dead before we can get a new one */ + /* + * Unfortunately, we found a dying target; need to wait until it's + * dead before we can get a new one. There is an anomaly here. We + * *should* call scsi_target_reap() to balance the kref_get() of the + * reap_ref above. However, since the target being released, it's + * already invisible and the reap_ref is irrelevant. If we call + * scsi_target_reap() we might spuriously do another device_del() on + * an already invisible target. + */ put_device(&found_target->dev); - flush_scheduled_work(); + /* + * length of time is irrelevant here, we just want to yield the CPU + * for a tick to avoid busy waiting for the target to die. + */ + msleep(1); goto retry; } -static void scsi_target_reap_usercontext(struct work_struct *work) -{ - struct scsi_target *starget = - container_of(work, struct scsi_target, ew.work); - - transport_remove_device(&starget->dev); - device_del(&starget->dev); - scsi_target_destroy(starget); -} - /** * scsi_target_reap - check to see if target is in use and destroy if not * @starget: target to be checked @@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work) */ void scsi_target_reap(struct scsi_target *starget) { - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); - unsigned long flags; - enum scsi_target_state state; - int empty = 0; - - spin_lock_irqsave(shost->host_lock, flags); - state = starget->state; - if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { - empty = 1; - starget->state = STARGET_DEL; - } - spin_unlock_irqrestore(shost->host_lock, flags); - - if (!empty) - return; - - BUG_ON(state == STARGET_DEL); - if (state == STARGET_CREATED) - scsi_target_destroy(starget); - else - execute_in_process_context(scsi_target_reap_usercontext, - &starget->ew); + /* + * serious problem if this triggers: STARGET_DEL is only set in the if + * the reap_ref drops to zero, so we're trying to do another final put + * on an already released kref + */ + BUG_ON(starget->state == STARGET_DEL); + scsi_target_reap_ref_put(starget); } /** @@ -924,6 +948,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, if (*bflags & BLIST_NO_DIF) sdev->no_dif = 1; + sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; + + if (*bflags & BLIST_SKIP_VPD_PAGES) + sdev->skip_vpd_pages = 1; + transport_configure_device(&sdev->sdev_gendev); if (sdev->host->hostt->slave_configure) { @@ -941,6 +970,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, } } + if (sdev->scsi_level >= SCSI_3) + scsi_attach_vpd(sdev); + sdev->max_queue_depth = sdev->queue_depth; /* @@ -1527,6 +1559,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, } mutex_unlock(&shost->scan_mutex); scsi_autopm_put_target(starget); + /* + * paired with scsi_alloc_target(). Target will be destroyed unless + * scsi_probe_and_add_lun made an underlying device visible + */ scsi_target_reap(starget); put_device(&starget->dev); @@ -1607,8 +1643,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, out_reap: scsi_autopm_put_target(starget); - /* now determine if the target has any children at all - * and if not, nuke it */ + /* + * paired with scsi_alloc_target(): determine if the target has + * any children at all and if not, nuke it + */ scsi_target_reap(starget); put_device(&starget->dev); diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c index 2b6b93f7d8e..546f16299ef 100644 --- a/drivers/scsi/scsi_sysctl.c +++ b/drivers/scsi/scsi_sysctl.c @@ -12,7 +12,7 @@ #include "scsi_priv.h" -static ctl_table scsi_table[] = { +static struct ctl_table scsi_table[] = { { .procname = "logging_level", .data = &scsi_logging_level, .maxlen = sizeof(scsi_logging_level), @@ -21,14 +21,14 @@ static ctl_table scsi_table[] = { { } }; -static ctl_table scsi_dir_table[] = { +static struct ctl_table scsi_dir_table[] = { { .procname = "scsi", .mode = 0555, .child = scsi_table }, { } }; -static ctl_table scsi_root_table[] = { +static struct ctl_table scsi_root_table[] = { { .procname = "dev", .mode = 0555, .child = scsi_dir_table }, diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 931a7d95420..074e8cc3095 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -281,6 +281,58 @@ exit_store_host_reset: static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); +static ssize_t +show_shost_eh_deadline(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + + if (shost->eh_deadline == -1) + return snprintf(buf, strlen("off") + 2, "off\n"); + return sprintf(buf, "%u\n", shost->eh_deadline / HZ); +} + +static ssize_t +store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + int ret = -EINVAL; + unsigned long deadline, flags; + + if (shost->transportt && + (shost->transportt->eh_strategy_handler || + !shost->hostt->eh_host_reset_handler)) + return ret; + + if (!strncmp(buf, "off", strlen("off"))) + deadline = -1; + else { + ret = kstrtoul(buf, 10, &deadline); + if (ret) + return ret; + if (deadline * HZ > UINT_MAX) + return -EINVAL; + } + + spin_lock_irqsave(shost->host_lock, flags); + if (scsi_host_in_recovery(shost)) + ret = -EBUSY; + else { + if (deadline == -1) + shost->eh_deadline = -1; + else + shost->eh_deadline = deadline * HZ; + + ret = count; + } + spin_unlock_irqrestore(shost->host_lock, flags); + + return ret; +} + +static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); + shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(host_busy, "%hu\n"); shost_rd_attr(cmd_per_lun, "%hd\n"); @@ -308,6 +360,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = { &dev_attr_prot_capabilities.attr, &dev_attr_prot_guard_type.attr, &dev_attr_host_reset.attr, + &dev_attr_eh_deadline.attr, NULL }; @@ -332,17 +385,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) { struct scsi_device *sdev; struct device *parent; - struct scsi_target *starget; struct list_head *this, *tmp; unsigned long flags; sdev = container_of(work, struct scsi_device, ew.work); parent = sdev->sdev_gendev.parent; - starget = to_scsi_target(parent); spin_lock_irqsave(sdev->host->host_lock, flags); - starget->reap_ref++; list_del(&sdev->siblings); list_del(&sdev->same_target_siblings); list_del(&sdev->starved_entry); @@ -362,8 +412,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) /* NULL queue means the device can't be used */ sdev->request_queue = NULL; - scsi_target_reap(scsi_target(sdev)); - + kfree(sdev->vpd_pg83); + kfree(sdev->vpd_pg80); kfree(sdev->inquiry); kfree(sdev); @@ -528,7 +578,7 @@ static int scsi_sdev_check_buf_bit(const char *buf) * Create the actual show/store functions and data structures. */ sdev_rd_attr (device_blocked, "%d\n"); -sdev_rd_attr (queue_depth, "%d\n"); +sdev_rd_attr (device_busy, "%d\n"); sdev_rd_attr (type, "%d\n"); sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (vendor, "%.8s\n"); @@ -560,31 +610,49 @@ sdev_store_timeout (struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); static ssize_t -store_rescan_field (struct device *dev, struct device_attribute *attr, +sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev; + sdev = to_scsi_device(dev); + return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); +} + +static ssize_t +sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - scsi_rescan_device(dev); + struct scsi_device *sdev; + unsigned int eh_timeout; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + sdev = to_scsi_device(dev); + err = kstrtouint(buf, 10, &eh_timeout); + if (err) + return err; + sdev->eh_timeout = eh_timeout * HZ; + return count; } -static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); +static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); -static void sdev_store_delete_callback(struct device *dev) +static ssize_t +store_rescan_field (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { - scsi_remove_device(to_scsi_device(dev)); + scsi_rescan_device(dev); + return count; } +static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); static ssize_t sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int rc; - - /* An attribute cannot be unregistered by one of its own methods, - * so we have to use this roundabout approach. - */ - rc = device_schedule_callback(dev, sdev_store_delete_callback); - if (rc) - count = rc; + if (device_remove_file_self(dev, attr)) + scsi_remove_device(to_scsi_device(dev)); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); @@ -642,10 +710,64 @@ show_queue_type_field(struct device *dev, struct device_attribute *attr, return snprintf(buf, 20, "%s\n", name); } -static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL); +static ssize_t +store_queue_type_field(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct scsi_host_template *sht = sdev->host->hostt; + int tag_type = 0, retval; + int prev_tag_type = scsi_get_tag_type(sdev); + + if (!sdev->tagged_supported || !sht->change_queue_type) + return -EINVAL; + + if (strncmp(buf, "ordered", 7) == 0) + tag_type = MSG_ORDERED_TAG; + else if (strncmp(buf, "simple", 6) == 0) + tag_type = MSG_SIMPLE_TAG; + else if (strncmp(buf, "none", 4) != 0) + return -EINVAL; + + if (tag_type == prev_tag_type) + return count; + + retval = sht->change_queue_type(sdev, tag_type); + if (retval < 0) + return retval; + + return count; +} + +static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, + store_queue_type_field); + +#define sdev_vpd_pg_attr(_page) \ +static ssize_t \ +show_vpd_##_page(struct file *filp, struct kobject *kobj, \ + struct bin_attribute *bin_attr, \ + char *buf, loff_t off, size_t count) \ +{ \ + struct device *dev = container_of(kobj, struct device, kobj); \ + struct scsi_device *sdev = to_scsi_device(dev); \ + if (!sdev->vpd_##_page) \ + return -EINVAL; \ + return memory_read_from_buffer(buf, count, &off, \ + sdev->vpd_##_page, \ + sdev->vpd_##_page##_len); \ +} \ +static struct bin_attribute dev_attr_vpd_##_page = { \ + .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ + .size = 0, \ + .read = show_vpd_##_page, \ +}; + +sdev_vpd_pg_attr(pg83); +sdev_vpd_pg_attr(pg80); static ssize_t -show_iostat_counterbits(struct device *dev, struct device_attribute *attr, char *buf) +show_iostat_counterbits(struct device *dev, struct device_attribute *attr, + char *buf) { return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); } @@ -710,40 +832,15 @@ sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ #define REF_EVT(name) &dev_attr_evt_##name.attr DECLARE_EVT(media_change, MEDIA_CHANGE) - -/* Default template for device attributes. May NOT be modified */ -static struct attribute *scsi_sdev_attrs[] = { - &dev_attr_device_blocked.attr, - &dev_attr_type.attr, - &dev_attr_scsi_level.attr, - &dev_attr_vendor.attr, - &dev_attr_model.attr, - &dev_attr_rev.attr, - &dev_attr_rescan.attr, - &dev_attr_delete.attr, - &dev_attr_state.attr, - &dev_attr_timeout.attr, - &dev_attr_iocounterbits.attr, - &dev_attr_iorequest_cnt.attr, - &dev_attr_iodone_cnt.attr, - &dev_attr_ioerr_cnt.attr, - &dev_attr_modalias.attr, - REF_EVT(media_change), - NULL -}; - -static struct attribute_group scsi_sdev_attr_group = { - .attrs = scsi_sdev_attrs, -}; - -static const struct attribute_group *scsi_sdev_attr_groups[] = { - &scsi_sdev_attr_group, - NULL -}; +DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) +DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) +DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) +DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) +DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) static ssize_t -sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { int depth, retval; struct scsi_device *sdev = to_scsi_device(dev); @@ -766,10 +863,10 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, return count; } +sdev_show_function(queue_depth, "%d\n"); -static struct device_attribute sdev_attr_queue_depth_rw = - __ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, - sdev_store_queue_depth_rw); +static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, + sdev_store_queue_depth); static ssize_t sdev_show_queue_ramp_up_period(struct device *dev, @@ -797,40 +894,79 @@ sdev_store_queue_ramp_up_period(struct device *dev, return period; } -static struct device_attribute sdev_attr_queue_ramp_up_period = - __ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, - sdev_show_queue_ramp_up_period, - sdev_store_queue_ramp_up_period); +static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, + sdev_show_queue_ramp_up_period, + sdev_store_queue_ramp_up_period); -static ssize_t -sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) { + struct device *dev = container_of(kobj, struct device, kobj); struct scsi_device *sdev = to_scsi_device(dev); - struct scsi_host_template *sht = sdev->host->hostt; - int tag_type = 0, retval; - int prev_tag_type = scsi_get_tag_type(sdev); - if (!sdev->tagged_supported || !sht->change_queue_type) - return -EINVAL; - if (strncmp(buf, "ordered", 7) == 0) - tag_type = MSG_ORDERED_TAG; - else if (strncmp(buf, "simple", 6) == 0) - tag_type = MSG_SIMPLE_TAG; - else if (strncmp(buf, "none", 4) != 0) - return -EINVAL; + if (attr == &dev_attr_queue_depth.attr && + !sdev->host->hostt->change_queue_depth) + return S_IRUGO; - if (tag_type == prev_tag_type) - return count; + if (attr == &dev_attr_queue_ramp_up_period.attr && + !sdev->host->hostt->change_queue_depth) + return 0; - retval = sht->change_queue_type(sdev, tag_type); - if (retval < 0) - return retval; + if (attr == &dev_attr_queue_type.attr && + !sdev->host->hostt->change_queue_type) + return S_IRUGO; - return count; + return attr->mode; } +/* Default template for device attributes. May NOT be modified */ +static struct attribute *scsi_sdev_attrs[] = { + &dev_attr_device_blocked.attr, + &dev_attr_type.attr, + &dev_attr_scsi_level.attr, + &dev_attr_device_busy.attr, + &dev_attr_vendor.attr, + &dev_attr_model.attr, + &dev_attr_rev.attr, + &dev_attr_rescan.attr, + &dev_attr_delete.attr, + &dev_attr_state.attr, + &dev_attr_timeout.attr, + &dev_attr_eh_timeout.attr, + &dev_attr_iocounterbits.attr, + &dev_attr_iorequest_cnt.attr, + &dev_attr_iodone_cnt.attr, + &dev_attr_ioerr_cnt.attr, + &dev_attr_modalias.attr, + &dev_attr_queue_depth.attr, + &dev_attr_queue_type.attr, + &dev_attr_queue_ramp_up_period.attr, + REF_EVT(media_change), + REF_EVT(inquiry_change_reported), + REF_EVT(capacity_change_reported), + REF_EVT(soft_threshold_reached), + REF_EVT(mode_parameter_change_reported), + REF_EVT(lun_change_reported), + NULL +}; + +static struct bin_attribute *scsi_sdev_bin_attrs[] = { + &dev_attr_vpd_pg83, + &dev_attr_vpd_pg80, + NULL +}; +static struct attribute_group scsi_sdev_attr_group = { + .attrs = scsi_sdev_attrs, + .bin_attrs = scsi_sdev_bin_attrs, + .is_visible = scsi_sdev_attr_is_visible, +}; + +static const struct attribute_group *scsi_sdev_attr_groups[] = { + &scsi_sdev_attr_group, + NULL +}; + static int scsi_target_add(struct scsi_target *starget) { int error; @@ -853,10 +989,6 @@ static int scsi_target_add(struct scsi_target *starget) return 0; } -static struct device_attribute sdev_attr_queue_type_rw = - __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, - sdev_store_queue_type_rw); - /** * scsi_sysfs_add_sdev - add scsi device to sysfs * @sdev: scsi_device to add @@ -910,25 +1042,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) transport_add_device(&sdev->sdev_gendev); sdev->is_visible = 1; - /* create queue files, which may be writable, depending on the host */ - if (sdev->host->hostt->change_queue_depth) { - error = device_create_file(&sdev->sdev_gendev, - &sdev_attr_queue_depth_rw); - error = device_create_file(&sdev->sdev_gendev, - &sdev_attr_queue_ramp_up_period); - } - else - error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); - if (error) - return error; - - if (sdev->host->hostt->change_queue_type) - error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); - else - error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); - if (error) - return error; - error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); if (error) @@ -978,6 +1091,13 @@ void __scsi_remove_device(struct scsi_device *sdev) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); + /* + * Paired with the kref_get() in scsi_sysfs_initialize(). We have + * remoed sysfs visibility from the device, so make the target + * invisible if this was the last device underneath it. + */ + scsi_target_reap(scsi_target(sdev)); + put_device(dev); } @@ -1040,7 +1160,7 @@ void scsi_remove_target(struct device *dev) continue; if (starget->dev.parent == dev || &starget->dev == dev) { /* assuming new targets arrive at the end */ - starget->reap_ref++; + kref_get(&starget->reap_ref); spin_unlock_irqrestore(shost->host_lock, flags); if (last) scsi_target_reap(last); @@ -1124,6 +1244,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev) list_add_tail(&sdev->same_target_siblings, &starget->devices); list_add_tail(&sdev->siblings, &shost->__devices); spin_unlock_irqrestore(shost->host_lock, flags); + /* + * device can now only be removed via __scsi_remove_device() so hold + * the target. Target will be held in CREATED state until something + * beneath it becomes visible (in which case it moves to RUNNING) + */ + kref_get(&starget->reap_ref); } int scsi_is_sdev_device(const struct device *dev) diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 84a1fdf6786..e51add05fb8 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c @@ -155,7 +155,8 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) __blk_put_request(q, rq); spin_unlock_irqrestore(q->queue_lock, flags); - __scsi_put_command(shost, cmd, &shost->shost_gendev); + __scsi_put_command(shost, cmd); + put_device(&shost->shost_gendev); } EXPORT_SYMBOL_GPL(scsi_host_put_command); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index e894ca7b54c..521f5838594 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -35,7 +35,6 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_cmnd.h> -#include <linux/netlink.h> #include <net/netlink.h> #include <scsi/scsi_netlink_fc.h> #include <scsi/scsi_bsg_fc.h> @@ -262,6 +261,7 @@ static const struct { { FC_PORTSPEED_10GBIT, "10 Gbit" }, { FC_PORTSPEED_8GBIT, "8 Gbit" }, { FC_PORTSPEED_16GBIT, "16 Gbit" }, + { FC_PORTSPEED_32GBIT, "32 Gbit" }, { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" }, }; fc_bitfield_name_search(port_speed, fc_port_speed_names) @@ -436,7 +436,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name), "fc_wq_%d", shost->host_no); - fc_host->work_q = alloc_workqueue(fc_host->work_q_name, 0, 0); + fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name); if (!fc_host->work_q) return -ENOMEM; @@ -444,8 +444,8 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev, snprintf(fc_host->devloss_work_q_name, sizeof(fc_host->devloss_work_q_name), "fc_dl_%d", shost->host_no); - fc_host->devloss_work_q = - alloc_workqueue(fc_host->devloss_work_q_name, 0, 0); + fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0, + fc_host->devloss_work_q_name); if (!fc_host->devloss_work_q) { destroy_workqueue(fc_host->work_q); fc_host->work_q = NULL; @@ -534,7 +534,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number, struct nlmsghdr *nlh; struct fc_nl_event *event; const char *name; - u32 len, skblen; + u32 len; int err; if (!scsi_nl_sock) { @@ -543,21 +543,19 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number, } len = FC_NL_MSGALIGN(sizeof(*event)); - skblen = NLMSG_SPACE(len); - skb = alloc_skb(skblen, GFP_KERNEL); + skb = nlmsg_new(len, GFP_KERNEL); if (!skb) { err = -ENOBUFS; goto send_fail; } - nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, - skblen - sizeof(*nlh), 0); + nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0); if (!nlh) { err = -ENOBUFS; goto send_fail_skb; } - event = NLMSG_DATA(nlh); + event = nlmsg_data(nlh); INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, FC_NL_ASYNC_EVENT, len); @@ -604,7 +602,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, struct sk_buff *skb; struct nlmsghdr *nlh; struct fc_nl_event *event; - u32 len, skblen; + u32 len; int err; if (!scsi_nl_sock) { @@ -613,21 +611,19 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, } len = FC_NL_MSGALIGN(sizeof(*event) + data_len); - skblen = NLMSG_SPACE(len); - skb = alloc_skb(skblen, GFP_KERNEL); + skb = nlmsg_new(len, GFP_KERNEL); if (!skb) { err = -ENOBUFS; goto send_vendor_fail; } - nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, - skblen - sizeof(*nlh), 0); + nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0); if (!nlh) { err = -ENOBUFS; goto send_vendor_fail_skb; } - event = NLMSG_DATA(nlh); + event = nlmsg_data(nlh); INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, FC_NL_ASYNC_EVENT, len); @@ -2553,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work) fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); + cancel_work_sync(&rport->scan_work); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0a74b975efd..0102a2d70dd 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -305,20 +305,71 @@ show_##type##_##name(struct device *dev, struct device_attribute *attr, \ iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \ static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); -/* generic read only ipvi4 attribute */ +#define iscsi_iface_attr(type, name, param) \ + iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param) \ +static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); + +/* generic read only ipv4 attribute */ iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR); iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW); iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET); iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO); +iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en, + ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en, + ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN); +iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN); +iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS); +iscsi_iface_net_attr(ipv4_iface, grat_arp_en, + ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en, + ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id, + ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID); +iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en, + ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en, + ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id, + ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID); +iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en, + ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN); +iscsi_iface_net_attr(ipv4_iface, fragment_disable, + ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE); +iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en, + ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN); +iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL); /* generic read only ipv6 attribute */ iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR); -iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL); +iscsi_iface_net_attr(ipv6_iface, link_local_addr, + ISCSI_NET_PARAM_IPV6_LINKLOCAL); iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER); iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg, ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG); iscsi_iface_net_attr(ipv6_iface, link_local_autocfg, ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG); +iscsi_iface_net_attr(ipv6_iface, link_local_state, + ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE); +iscsi_iface_net_attr(ipv6_iface, router_state, + ISCSI_NET_PARAM_IPV6_ROUTER_STATE); +iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en, + ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN); +iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN); +iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL); +iscsi_iface_net_attr(ipv6_iface, traffic_class, + ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS); +iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT); +iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo, + ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO); +iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time, + ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME); +iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo, + ISCSI_NET_PARAM_IPV6_ND_STALE_TMO); +iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt, + ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT); +iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu, + ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU); /* common read only iface attribute */ iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE); @@ -327,6 +378,40 @@ iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY); iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED); iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU); iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT); +iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE); +iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN); +iscsi_iface_net_attr(iface, tcp_nagle_disable, + ISCSI_NET_PARAM_TCP_NAGLE_DISABLE); +iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE); +iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF); +iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE); +iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN); +iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID); +iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN); + +/* common iscsi specific settings attributes */ +iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO); +iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN); +iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN); +iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN); +iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN); +iscsi_iface_attr(iface, data_seq_in_order, + ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN); +iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN); +iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL); +iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH); +iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST); +iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T); +iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST); +iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN); +iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN); +iscsi_iface_attr(iface, discovery_auth_optional, + ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL); +iscsi_iface_attr(iface, discovery_logout, + ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN); +iscsi_iface_attr(iface, strict_login_comp_en, + ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN); +iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME); static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) @@ -335,6 +420,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, struct iscsi_iface *iface = iscsi_dev_to_iface(dev); struct iscsi_transport *t = iface->transport; int param; + int param_type; if (attr == &dev_attr_iface_enabled.attr) param = ISCSI_NET_PARAM_IFACE_ENABLE; @@ -348,6 +434,60 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, param = ISCSI_NET_PARAM_MTU; else if (attr == &dev_attr_iface_port.attr) param = ISCSI_NET_PARAM_PORT; + else if (attr == &dev_attr_iface_ipaddress_state.attr) + param = ISCSI_NET_PARAM_IPADDR_STATE; + else if (attr == &dev_attr_iface_delayed_ack_en.attr) + param = ISCSI_NET_PARAM_DELAYED_ACK_EN; + else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) + param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) + param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_iface_tcp_wsf.attr) + param = ISCSI_NET_PARAM_TCP_WSF; + else if (attr == &dev_attr_iface_tcp_timer_scale.attr) + param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) + param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_iface_cache_id.attr) + param = ISCSI_NET_PARAM_CACHE_ID; + else if (attr == &dev_attr_iface_redirect_en.attr) + param = ISCSI_NET_PARAM_REDIRECT_EN; + else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) + param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; + else if (attr == &dev_attr_iface_header_digest.attr) + param = ISCSI_IFACE_PARAM_HDRDGST_EN; + else if (attr == &dev_attr_iface_data_digest.attr) + param = ISCSI_IFACE_PARAM_DATADGST_EN; + else if (attr == &dev_attr_iface_immediate_data.attr) + param = ISCSI_IFACE_PARAM_IMM_DATA_EN; + else if (attr == &dev_attr_iface_initial_r2t.attr) + param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN; + else if (attr == &dev_attr_iface_data_seq_in_order.attr) + param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN; + else if (attr == &dev_attr_iface_data_pdu_in_order.attr) + param = ISCSI_IFACE_PARAM_PDU_INORDER_EN; + else if (attr == &dev_attr_iface_erl.attr) + param = ISCSI_IFACE_PARAM_ERL; + else if (attr == &dev_attr_iface_max_recv_dlength.attr) + param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH; + else if (attr == &dev_attr_iface_first_burst_len.attr) + param = ISCSI_IFACE_PARAM_FIRST_BURST; + else if (attr == &dev_attr_iface_max_outstanding_r2t.attr) + param = ISCSI_IFACE_PARAM_MAX_R2T; + else if (attr == &dev_attr_iface_max_burst_len.attr) + param = ISCSI_IFACE_PARAM_MAX_BURST; + else if (attr == &dev_attr_iface_chap_auth.attr) + param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN; + else if (attr == &dev_attr_iface_bidi_chap.attr) + param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN; + else if (attr == &dev_attr_iface_discovery_auth_optional.attr) + param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL; + else if (attr == &dev_attr_iface_discovery_logout.attr) + param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN; + else if (attr == &dev_attr_iface_strict_login_comp_en.attr) + param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; + else if (attr == &dev_attr_iface_initiator_name.attr) + param = ISCSI_IFACE_PARAM_INITIATOR_NAME; else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { if (attr == &dev_attr_ipv4_iface_ipaddress.attr) param = ISCSI_NET_PARAM_IPV4_ADDR; @@ -357,6 +497,42 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, param = ISCSI_NET_PARAM_IPV4_SUBNET; else if (attr == &dev_attr_ipv4_iface_bootproto.attr) param = ISCSI_NET_PARAM_IPV4_BOOTPROTO; + else if (attr == + &dev_attr_ipv4_iface_dhcp_dns_address_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN; + else if (attr == + &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN; + else if (attr == &dev_attr_ipv4_iface_tos_en.attr) + param = ISCSI_NET_PARAM_IPV4_TOS_EN; + else if (attr == &dev_attr_ipv4_iface_tos.attr) + param = ISCSI_NET_PARAM_IPV4_TOS; + else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr) + param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN; + else if (attr == + &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN; + else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID; + else if (attr == + &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN; + else if (attr == + &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN; + else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID; + else if (attr == + &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr) + param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN; + else if (attr == + &dev_attr_ipv4_iface_fragment_disable.attr) + param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE; + else if (attr == + &dev_attr_ipv4_iface_incoming_forwarding_en.attr) + param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN; + else if (attr == &dev_attr_ipv4_iface_ttl.attr) + param = ISCSI_NET_PARAM_IPV4_TTL; else return 0; } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) { @@ -370,6 +546,31 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG; else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr) param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG; + else if (attr == &dev_attr_ipv6_iface_link_local_state.attr) + param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE; + else if (attr == &dev_attr_ipv6_iface_router_state.attr) + param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE; + else if (attr == + &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr) + param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN; + else if (attr == &dev_attr_ipv6_iface_mld_en.attr) + param = ISCSI_NET_PARAM_IPV6_MLD_EN; + else if (attr == &dev_attr_ipv6_iface_flow_label.attr) + param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL; + else if (attr == &dev_attr_ipv6_iface_traffic_class.attr) + param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS; + else if (attr == &dev_attr_ipv6_iface_hop_limit.attr) + param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT; + else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr) + param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO; + else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr) + param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME; + else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr) + param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO; + else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr) + param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT; + else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr) + param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU; else return 0; } else { @@ -377,7 +578,32 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, return 0; } - return t->attr_is_visible(ISCSI_NET_PARAM, param); + switch (param) { + case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: + case ISCSI_IFACE_PARAM_HDRDGST_EN: + case ISCSI_IFACE_PARAM_DATADGST_EN: + case ISCSI_IFACE_PARAM_IMM_DATA_EN: + case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: + case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: + case ISCSI_IFACE_PARAM_PDU_INORDER_EN: + case ISCSI_IFACE_PARAM_ERL: + case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: + case ISCSI_IFACE_PARAM_FIRST_BURST: + case ISCSI_IFACE_PARAM_MAX_R2T: + case ISCSI_IFACE_PARAM_MAX_BURST: + case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: + case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: + case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: + case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: + case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: + case ISCSI_IFACE_PARAM_INITIATOR_NAME: + param_type = ISCSI_IFACE_PARAM; + break; + default: + param_type = ISCSI_NET_PARAM; + } + + return t->attr_is_visible(param_type, param); } static struct attribute *iscsi_iface_attrs[] = { @@ -396,6 +622,59 @@ static struct attribute *iscsi_iface_attrs[] = { &dev_attr_ipv6_iface_link_local_autocfg.attr, &dev_attr_iface_mtu.attr, &dev_attr_iface_port.attr, + &dev_attr_iface_ipaddress_state.attr, + &dev_attr_iface_delayed_ack_en.attr, + &dev_attr_iface_tcp_nagle_disable.attr, + &dev_attr_iface_tcp_wsf_disable.attr, + &dev_attr_iface_tcp_wsf.attr, + &dev_attr_iface_tcp_timer_scale.attr, + &dev_attr_iface_tcp_timestamp_en.attr, + &dev_attr_iface_cache_id.attr, + &dev_attr_iface_redirect_en.attr, + &dev_attr_iface_def_taskmgmt_tmo.attr, + &dev_attr_iface_header_digest.attr, + &dev_attr_iface_data_digest.attr, + &dev_attr_iface_immediate_data.attr, + &dev_attr_iface_initial_r2t.attr, + &dev_attr_iface_data_seq_in_order.attr, + &dev_attr_iface_data_pdu_in_order.attr, + &dev_attr_iface_erl.attr, + &dev_attr_iface_max_recv_dlength.attr, + &dev_attr_iface_first_burst_len.attr, + &dev_attr_iface_max_outstanding_r2t.attr, + &dev_attr_iface_max_burst_len.attr, + &dev_attr_iface_chap_auth.attr, + &dev_attr_iface_bidi_chap.attr, + &dev_attr_iface_discovery_auth_optional.attr, + &dev_attr_iface_discovery_logout.attr, + &dev_attr_iface_strict_login_comp_en.attr, + &dev_attr_iface_initiator_name.attr, + &dev_attr_ipv4_iface_dhcp_dns_address_en.attr, + &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr, + &dev_attr_ipv4_iface_tos_en.attr, + &dev_attr_ipv4_iface_tos.attr, + &dev_attr_ipv4_iface_grat_arp_en.attr, + &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr, + &dev_attr_ipv4_iface_dhcp_alt_client_id.attr, + &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr, + &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr, + &dev_attr_ipv4_iface_dhcp_vendor_id.attr, + &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr, + &dev_attr_ipv4_iface_fragment_disable.attr, + &dev_attr_ipv4_iface_incoming_forwarding_en.attr, + &dev_attr_ipv4_iface_ttl.attr, + &dev_attr_ipv6_iface_link_local_state.attr, + &dev_attr_ipv6_iface_router_state.attr, + &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr, + &dev_attr_ipv6_iface_mld_en.attr, + &dev_attr_ipv6_iface_flow_label.attr, + &dev_attr_ipv6_iface_traffic_class.attr, + &dev_attr_ipv6_iface_hop_limit.attr, + &dev_attr_ipv6_iface_nd_reachable_tmo.attr, + &dev_attr_ipv6_iface_nd_rexmit_time.attr, + &dev_attr_ipv6_iface_nd_stale_tmo.attr, + &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr, + &dev_attr_ipv6_iface_router_adv_link_mtu.attr, NULL, }; @@ -404,6 +683,61 @@ static struct attribute_group iscsi_iface_group = { .is_visible = iscsi_iface_attr_is_visible, }; +/* convert iscsi_ipaddress_state values to ascii string name */ +static const struct { + enum iscsi_ipaddress_state value; + char *name; +} iscsi_ipaddress_state_names[] = { + {ISCSI_IPDDRESS_STATE_UNCONFIGURED, "Unconfigured" }, + {ISCSI_IPDDRESS_STATE_ACQUIRING, "Acquiring" }, + {ISCSI_IPDDRESS_STATE_TENTATIVE, "Tentative" }, + {ISCSI_IPDDRESS_STATE_VALID, "Valid" }, + {ISCSI_IPDDRESS_STATE_DISABLING, "Disabling" }, + {ISCSI_IPDDRESS_STATE_INVALID, "Invalid" }, + {ISCSI_IPDDRESS_STATE_DEPRECATED, "Deprecated" }, +}; + +char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state) +{ + int i; + char *state = NULL; + + for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) { + if (iscsi_ipaddress_state_names[i].value == port_state) { + state = iscsi_ipaddress_state_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name); + +/* convert iscsi_router_state values to ascii string name */ +static const struct { + enum iscsi_router_state value; + char *name; +} iscsi_router_state_names[] = { + {ISCSI_ROUTER_STATE_UNKNOWN, "Unknown" }, + {ISCSI_ROUTER_STATE_ADVERTISED, "Advertised" }, + {ISCSI_ROUTER_STATE_MANUAL, "Manual" }, + {ISCSI_ROUTER_STATE_STALE, "Stale" }, +}; + +char *iscsi_get_router_state_name(enum iscsi_router_state router_state) +{ + int i; + char *state = NULL; + + for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) { + if (iscsi_router_state_names[i].value == router_state) { + state = iscsi_router_state_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_router_state_name); + struct iscsi_iface * iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, uint32_t iface_type, uint32_t iface_num, int dd_size) @@ -460,6 +794,679 @@ void iscsi_destroy_iface(struct iscsi_iface *iface) EXPORT_SYMBOL_GPL(iscsi_destroy_iface); /* + * Interface to display flash node params to sysfs + */ + +#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name, _mode, _show, _store) + +/* flash node session attrs show */ +#define iscsi_flashnode_sess_attr_show(type, name, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_bus_flash_session *fnode_sess = \ + iscsi_dev_to_flash_session(dev);\ + struct iscsi_transport *t = fnode_sess->transport; \ + return t->get_flashnode_param(fnode_sess, param, buf); \ +} \ + + +#define iscsi_flashnode_sess_attr(type, name, param) \ + iscsi_flashnode_sess_attr_show(type, name, param) \ +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ + show_##type##_##name, NULL); + +/* Flash node session attributes */ + +iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable, + ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE); +iscsi_flashnode_sess_attr(fnode, discovery_session, + ISCSI_FLASHNODE_DISCOVERY_SESS); +iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE); +iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN); +iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN); +iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN); +iscsi_flashnode_sess_attr(fnode, data_seq_in_order, + ISCSI_FLASHNODE_DATASEQ_INORDER); +iscsi_flashnode_sess_attr(fnode, data_pdu_in_order, + ISCSI_FLASHNODE_PDU_INORDER); +iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN); +iscsi_flashnode_sess_attr(fnode, discovery_logout, + ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN); +iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN); +iscsi_flashnode_sess_attr(fnode, discovery_auth_optional, + ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL); +iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL); +iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST); +iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT); +iscsi_flashnode_sess_attr(fnode, def_time2retain, + ISCSI_FLASHNODE_DEF_TIME2RETAIN); +iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T); +iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID); +iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID); +iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST); +iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo, + ISCSI_FLASHNODE_DEF_TASKMGMT_TMO); +iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS); +iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME); +iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT); +iscsi_flashnode_sess_attr(fnode, discovery_parent_idx, + ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX); +iscsi_flashnode_sess_attr(fnode, discovery_parent_type, + ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE); +iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX); +iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX); +iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME); +iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN); +iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD); +iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN); +iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT); + +static struct attribute *iscsi_flashnode_sess_attrs[] = { + &dev_attr_fnode_auto_snd_tgt_disable.attr, + &dev_attr_fnode_discovery_session.attr, + &dev_attr_fnode_portal_type.attr, + &dev_attr_fnode_entry_enable.attr, + &dev_attr_fnode_immediate_data.attr, + &dev_attr_fnode_initial_r2t.attr, + &dev_attr_fnode_data_seq_in_order.attr, + &dev_attr_fnode_data_pdu_in_order.attr, + &dev_attr_fnode_chap_auth.attr, + &dev_attr_fnode_discovery_logout.attr, + &dev_attr_fnode_bidi_chap.attr, + &dev_attr_fnode_discovery_auth_optional.attr, + &dev_attr_fnode_erl.attr, + &dev_attr_fnode_first_burst_len.attr, + &dev_attr_fnode_def_time2wait.attr, + &dev_attr_fnode_def_time2retain.attr, + &dev_attr_fnode_max_outstanding_r2t.attr, + &dev_attr_fnode_isid.attr, + &dev_attr_fnode_tsid.attr, + &dev_attr_fnode_max_burst_len.attr, + &dev_attr_fnode_def_taskmgmt_tmo.attr, + &dev_attr_fnode_targetalias.attr, + &dev_attr_fnode_targetname.attr, + &dev_attr_fnode_tpgt.attr, + &dev_attr_fnode_discovery_parent_idx.attr, + &dev_attr_fnode_discovery_parent_type.attr, + &dev_attr_fnode_chap_in_idx.attr, + &dev_attr_fnode_chap_out_idx.attr, + &dev_attr_fnode_username.attr, + &dev_attr_fnode_username_in.attr, + &dev_attr_fnode_password.attr, + &dev_attr_fnode_password_in.attr, + &dev_attr_fnode_is_boot_target.attr, + NULL, +}; + +static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_bus_flash_session *fnode_sess = + iscsi_dev_to_flash_session(dev); + struct iscsi_transport *t = fnode_sess->transport; + int param; + + if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) { + param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE; + } else if (attr == &dev_attr_fnode_discovery_session.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_SESS; + } else if (attr == &dev_attr_fnode_portal_type.attr) { + param = ISCSI_FLASHNODE_PORTAL_TYPE; + } else if (attr == &dev_attr_fnode_entry_enable.attr) { + param = ISCSI_FLASHNODE_ENTRY_EN; + } else if (attr == &dev_attr_fnode_immediate_data.attr) { + param = ISCSI_FLASHNODE_IMM_DATA_EN; + } else if (attr == &dev_attr_fnode_initial_r2t.attr) { + param = ISCSI_FLASHNODE_INITIAL_R2T_EN; + } else if (attr == &dev_attr_fnode_data_seq_in_order.attr) { + param = ISCSI_FLASHNODE_DATASEQ_INORDER; + } else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) { + param = ISCSI_FLASHNODE_PDU_INORDER; + } else if (attr == &dev_attr_fnode_chap_auth.attr) { + param = ISCSI_FLASHNODE_CHAP_AUTH_EN; + } else if (attr == &dev_attr_fnode_discovery_logout.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN; + } else if (attr == &dev_attr_fnode_bidi_chap.attr) { + param = ISCSI_FLASHNODE_BIDI_CHAP_EN; + } else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL; + } else if (attr == &dev_attr_fnode_erl.attr) { + param = ISCSI_FLASHNODE_ERL; + } else if (attr == &dev_attr_fnode_first_burst_len.attr) { + param = ISCSI_FLASHNODE_FIRST_BURST; + } else if (attr == &dev_attr_fnode_def_time2wait.attr) { + param = ISCSI_FLASHNODE_DEF_TIME2WAIT; + } else if (attr == &dev_attr_fnode_def_time2retain.attr) { + param = ISCSI_FLASHNODE_DEF_TIME2RETAIN; + } else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) { + param = ISCSI_FLASHNODE_MAX_R2T; + } else if (attr == &dev_attr_fnode_isid.attr) { + param = ISCSI_FLASHNODE_ISID; + } else if (attr == &dev_attr_fnode_tsid.attr) { + param = ISCSI_FLASHNODE_TSID; + } else if (attr == &dev_attr_fnode_max_burst_len.attr) { + param = ISCSI_FLASHNODE_MAX_BURST; + } else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) { + param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO; + } else if (attr == &dev_attr_fnode_targetalias.attr) { + param = ISCSI_FLASHNODE_ALIAS; + } else if (attr == &dev_attr_fnode_targetname.attr) { + param = ISCSI_FLASHNODE_NAME; + } else if (attr == &dev_attr_fnode_tpgt.attr) { + param = ISCSI_FLASHNODE_TPGT; + } else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX; + } else if (attr == &dev_attr_fnode_discovery_parent_type.attr) { + param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE; + } else if (attr == &dev_attr_fnode_chap_in_idx.attr) { + param = ISCSI_FLASHNODE_CHAP_IN_IDX; + } else if (attr == &dev_attr_fnode_chap_out_idx.attr) { + param = ISCSI_FLASHNODE_CHAP_OUT_IDX; + } else if (attr == &dev_attr_fnode_username.attr) { + param = ISCSI_FLASHNODE_USERNAME; + } else if (attr == &dev_attr_fnode_username_in.attr) { + param = ISCSI_FLASHNODE_USERNAME_IN; + } else if (attr == &dev_attr_fnode_password.attr) { + param = ISCSI_FLASHNODE_PASSWORD; + } else if (attr == &dev_attr_fnode_password_in.attr) { + param = ISCSI_FLASHNODE_PASSWORD_IN; + } else if (attr == &dev_attr_fnode_is_boot_target.attr) { + param = ISCSI_FLASHNODE_IS_BOOT_TGT; + } else { + WARN_ONCE(1, "Invalid flashnode session attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); +} + +static struct attribute_group iscsi_flashnode_sess_attr_group = { + .attrs = iscsi_flashnode_sess_attrs, + .is_visible = iscsi_flashnode_sess_attr_is_visible, +}; + +static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = { + &iscsi_flashnode_sess_attr_group, + NULL, +}; + +static void iscsi_flashnode_sess_release(struct device *dev) +{ + struct iscsi_bus_flash_session *fnode_sess = + iscsi_dev_to_flash_session(dev); + + kfree(fnode_sess->targetname); + kfree(fnode_sess->targetalias); + kfree(fnode_sess->portal_type); + kfree(fnode_sess); +} + +struct device_type iscsi_flashnode_sess_dev_type = { + .name = "iscsi_flashnode_sess_dev_type", + .groups = iscsi_flashnode_sess_attr_groups, + .release = iscsi_flashnode_sess_release, +}; + +/* flash node connection attrs show */ +#define iscsi_flashnode_conn_attr_show(type, name, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\ + struct iscsi_bus_flash_session *fnode_sess = \ + iscsi_flash_conn_to_flash_session(fnode_conn);\ + struct iscsi_transport *t = fnode_conn->transport; \ + return t->get_flashnode_param(fnode_sess, param, buf); \ +} \ + + +#define iscsi_flashnode_conn_attr(type, name, param) \ + iscsi_flashnode_conn_attr_show(type, name, param) \ +static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO, \ + show_##type##_##name, NULL); + +/* Flash node connection attributes */ + +iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6, + ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6); +iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN); +iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN); +iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN); +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat, + ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT); +iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable, + ISCSI_FLASHNODE_TCP_NAGLE_DISABLE); +iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable, + ISCSI_FLASHNODE_TCP_WSF_DISABLE); +iscsi_flashnode_conn_attr(fnode, tcp_timer_scale, + ISCSI_FLASHNODE_TCP_TIMER_SCALE); +iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable, + ISCSI_FLASHNODE_TCP_TIMESTAMP_EN); +iscsi_flashnode_conn_attr(fnode, fragment_disable, + ISCSI_FLASHNODE_IP_FRAG_DISABLE); +iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO); +iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT); +iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR); +iscsi_flashnode_conn_attr(fnode, max_recv_dlength, + ISCSI_FLASHNODE_MAX_RECV_DLENGTH); +iscsi_flashnode_conn_attr(fnode, max_xmit_dlength, + ISCSI_FLASHNODE_MAX_XMIT_DLENGTH); +iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT); +iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS); +iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC); +iscsi_flashnode_conn_attr(fnode, ipv6_flow_label, + ISCSI_FLASHNODE_IPV6_FLOW_LABEL); +iscsi_flashnode_conn_attr(fnode, redirect_ipaddr, + ISCSI_FLASHNODE_REDIRECT_IPADDR); +iscsi_flashnode_conn_attr(fnode, max_segment_size, + ISCSI_FLASHNODE_MAX_SEGMENT_SIZE); +iscsi_flashnode_conn_attr(fnode, link_local_ipv6, + ISCSI_FLASHNODE_LINK_LOCAL_IPV6); +iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF); +iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF); +iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN); +iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN); + +static struct attribute *iscsi_flashnode_conn_attrs[] = { + &dev_attr_fnode_is_fw_assigned_ipv6.attr, + &dev_attr_fnode_header_digest.attr, + &dev_attr_fnode_data_digest.attr, + &dev_attr_fnode_snack_req.attr, + &dev_attr_fnode_tcp_timestamp_stat.attr, + &dev_attr_fnode_tcp_nagle_disable.attr, + &dev_attr_fnode_tcp_wsf_disable.attr, + &dev_attr_fnode_tcp_timer_scale.attr, + &dev_attr_fnode_tcp_timestamp_enable.attr, + &dev_attr_fnode_fragment_disable.attr, + &dev_attr_fnode_max_recv_dlength.attr, + &dev_attr_fnode_max_xmit_dlength.attr, + &dev_attr_fnode_keepalive_tmo.attr, + &dev_attr_fnode_port.attr, + &dev_attr_fnode_ipaddress.attr, + &dev_attr_fnode_redirect_ipaddr.attr, + &dev_attr_fnode_max_segment_size.attr, + &dev_attr_fnode_local_port.attr, + &dev_attr_fnode_ipv4_tos.attr, + &dev_attr_fnode_ipv6_traffic_class.attr, + &dev_attr_fnode_ipv6_flow_label.attr, + &dev_attr_fnode_link_local_ipv6.attr, + &dev_attr_fnode_tcp_xmit_wsf.attr, + &dev_attr_fnode_tcp_recv_wsf.attr, + &dev_attr_fnode_statsn.attr, + &dev_attr_fnode_exp_statsn.attr, + NULL, +}; + +static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); + struct iscsi_transport *t = fnode_conn->transport; + int param; + + if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) { + param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6; + } else if (attr == &dev_attr_fnode_header_digest.attr) { + param = ISCSI_FLASHNODE_HDR_DGST_EN; + } else if (attr == &dev_attr_fnode_data_digest.attr) { + param = ISCSI_FLASHNODE_DATA_DGST_EN; + } else if (attr == &dev_attr_fnode_snack_req.attr) { + param = ISCSI_FLASHNODE_SNACK_REQ_EN; + } else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) { + param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT; + } else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) { + param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE; + } else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) { + param = ISCSI_FLASHNODE_TCP_WSF_DISABLE; + } else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) { + param = ISCSI_FLASHNODE_TCP_TIMER_SCALE; + } else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) { + param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN; + } else if (attr == &dev_attr_fnode_fragment_disable.attr) { + param = ISCSI_FLASHNODE_IP_FRAG_DISABLE; + } else if (attr == &dev_attr_fnode_max_recv_dlength.attr) { + param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH; + } else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) { + param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH; + } else if (attr == &dev_attr_fnode_keepalive_tmo.attr) { + param = ISCSI_FLASHNODE_KEEPALIVE_TMO; + } else if (attr == &dev_attr_fnode_port.attr) { + param = ISCSI_FLASHNODE_PORT; + } else if (attr == &dev_attr_fnode_ipaddress.attr) { + param = ISCSI_FLASHNODE_IPADDR; + } else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) { + param = ISCSI_FLASHNODE_REDIRECT_IPADDR; + } else if (attr == &dev_attr_fnode_max_segment_size.attr) { + param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE; + } else if (attr == &dev_attr_fnode_local_port.attr) { + param = ISCSI_FLASHNODE_LOCAL_PORT; + } else if (attr == &dev_attr_fnode_ipv4_tos.attr) { + param = ISCSI_FLASHNODE_IPV4_TOS; + } else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) { + param = ISCSI_FLASHNODE_IPV6_TC; + } else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) { + param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL; + } else if (attr == &dev_attr_fnode_link_local_ipv6.attr) { + param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6; + } else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) { + param = ISCSI_FLASHNODE_TCP_XMIT_WSF; + } else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) { + param = ISCSI_FLASHNODE_TCP_RECV_WSF; + } else if (attr == &dev_attr_fnode_statsn.attr) { + param = ISCSI_FLASHNODE_STATSN; + } else if (attr == &dev_attr_fnode_exp_statsn.attr) { + param = ISCSI_FLASHNODE_EXP_STATSN; + } else { + WARN_ONCE(1, "Invalid flashnode connection attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param); +} + +static struct attribute_group iscsi_flashnode_conn_attr_group = { + .attrs = iscsi_flashnode_conn_attrs, + .is_visible = iscsi_flashnode_conn_attr_is_visible, +}; + +static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = { + &iscsi_flashnode_conn_attr_group, + NULL, +}; + +static void iscsi_flashnode_conn_release(struct device *dev) +{ + struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev); + + kfree(fnode_conn->ipaddress); + kfree(fnode_conn->redirect_ipaddr); + kfree(fnode_conn->link_local_ipv6_addr); + kfree(fnode_conn); +} + +struct device_type iscsi_flashnode_conn_dev_type = { + .name = "iscsi_flashnode_conn_dev_type", + .groups = iscsi_flashnode_conn_attr_groups, + .release = iscsi_flashnode_conn_release, +}; + +struct bus_type iscsi_flashnode_bus; + +int iscsi_flashnode_bus_match(struct device *dev, + struct device_driver *drv) +{ + if (dev->bus == &iscsi_flashnode_bus) + return 1; + return 0; +} +EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match); + +struct bus_type iscsi_flashnode_bus = { + .name = "iscsi_flashnode", + .match = &iscsi_flashnode_bus_match, +}; + +/** + * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs + * @shost: pointer to host data + * @index: index of flashnode to add in sysfs + * @transport: pointer to transport data + * @dd_size: total size to allocate + * + * Adds a sysfs entry for the flashnode session attributes + * + * Returns: + * pointer to allocated flashnode sess on success + * %NULL on failure + */ +struct iscsi_bus_flash_session * +iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, + struct iscsi_transport *transport, + int dd_size) +{ + struct iscsi_bus_flash_session *fnode_sess; + int err; + + fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL); + if (!fnode_sess) + return NULL; + + fnode_sess->transport = transport; + fnode_sess->target_id = index; + fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type; + fnode_sess->dev.bus = &iscsi_flashnode_bus; + fnode_sess->dev.parent = &shost->shost_gendev; + dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u", + shost->host_no, index); + + err = device_register(&fnode_sess->dev); + if (err) + goto free_fnode_sess; + + if (dd_size) + fnode_sess->dd_data = &fnode_sess[1]; + + return fnode_sess; + +free_fnode_sess: + kfree(fnode_sess); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); + +/** + * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs + * @shost: pointer to host data + * @fnode_sess: pointer to the parent flashnode session entry + * @transport: pointer to transport data + * @dd_size: total size to allocate + * + * Adds a sysfs entry for the flashnode connection attributes + * + * Returns: + * pointer to allocated flashnode conn on success + * %NULL on failure + */ +struct iscsi_bus_flash_conn * +iscsi_create_flashnode_conn(struct Scsi_Host *shost, + struct iscsi_bus_flash_session *fnode_sess, + struct iscsi_transport *transport, + int dd_size) +{ + struct iscsi_bus_flash_conn *fnode_conn; + int err; + + fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL); + if (!fnode_conn) + return NULL; + + fnode_conn->transport = transport; + fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type; + fnode_conn->dev.bus = &iscsi_flashnode_bus; + fnode_conn->dev.parent = &fnode_sess->dev; + dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0", + shost->host_no, fnode_sess->target_id); + + err = device_register(&fnode_conn->dev); + if (err) + goto free_fnode_conn; + + if (dd_size) + fnode_conn->dd_data = &fnode_conn[1]; + + return fnode_conn; + +free_fnode_conn: + kfree(fnode_conn); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); + +/** + * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn + * @dev: device to verify + * @data: pointer to data containing value to use for verification + * + * Verifies if the passed device is flashnode conn device + * + * Returns: + * 1 on success + * 0 on failure + */ +int iscsi_is_flashnode_conn_dev(struct device *dev, void *data) +{ + return dev->bus == &iscsi_flashnode_bus; +} +EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev); + +static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn) +{ + device_unregister(&fnode_conn->dev); + return 0; +} + +static int flashnode_match_index(struct device *dev, void *data) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + int ret = 0; + + if (!iscsi_flashnode_bus_match(dev, NULL)) + goto exit_match_index; + + fnode_sess = iscsi_dev_to_flash_session(dev); + ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0; + +exit_match_index: + return ret; +} + +/** + * iscsi_get_flashnode_by_index -finds flashnode session entry by index + * @shost: pointer to host data + * @idx: index to match + * + * Finds the flashnode session object for the passed index + * + * Returns: + * pointer to found flashnode session object on success + * %NULL on failure + */ +static struct iscsi_bus_flash_session * +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) +{ + struct iscsi_bus_flash_session *fnode_sess = NULL; + struct device *dev; + + dev = device_find_child(&shost->shost_gendev, &idx, + flashnode_match_index); + if (dev) + fnode_sess = iscsi_dev_to_flash_session(dev); + + return fnode_sess; +} + +/** + * iscsi_find_flashnode_sess - finds flashnode session entry + * @shost: pointer to host data + * @data: pointer to data containing value to use for comparison + * @fn: function pointer that does actual comparison + * + * Finds the flashnode session object comparing the data passed using logic + * defined in passed function pointer + * + * Returns: + * pointer to found flashnode session device object on success + * %NULL on failure + */ +struct device * +iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, + int (*fn)(struct device *dev, void *data)) +{ + return device_find_child(&shost->shost_gendev, data, fn); +} +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); + +/** + * iscsi_find_flashnode_conn - finds flashnode connection entry + * @fnode_sess: pointer to parent flashnode session entry + * + * Finds the flashnode connection object comparing the data passed using logic + * defined in passed function pointer + * + * Returns: + * pointer to found flashnode connection device object on success + * %NULL on failure + */ +struct device * +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) +{ + return device_find_child(&fnode_sess->dev, NULL, + iscsi_is_flashnode_conn_dev); +} +EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); + +static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data) +{ + if (!iscsi_is_flashnode_conn_dev(dev, NULL)) + return 0; + + return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev)); +} + +/** + * iscsi_destroy_flashnode_sess - destroy flashnode session entry + * @fnode_sess: pointer to flashnode session entry to be destroyed + * + * Deletes the flashnode session entry and all children flashnode connection + * entries from sysfs + */ +void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess) +{ + int err; + + err = device_for_each_child(&fnode_sess->dev, NULL, + iscsi_iter_destroy_flashnode_conn_fn); + if (err) + pr_err("Could not delete all connections for %s. Error %d.\n", + fnode_sess->dev.kobj.name, err); + + device_unregister(&fnode_sess->dev); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess); + +static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data) +{ + if (!iscsi_flashnode_bus_match(dev, NULL)) + return 0; + + iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev)); + return 0; +} + +/** + * iscsi_destroy_all_flashnode - destroy all flashnode session entries + * @shost: pointer to host data + * + * Destroys all the flashnode session entries and all corresponding children + * flashnode connection entries from sysfs + */ +void iscsi_destroy_all_flashnode(struct Scsi_Host *shost) +{ + device_for_each_child(&shost->shost_gendev, NULL, + iscsi_iter_destroy_flashnode_fn); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode); + +/* * BSG support */ /** @@ -1344,8 +2351,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, struct iscsi_uevent *ev; char *pdu; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + - data_size); + int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) + + data_size); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@ -1360,7 +2367,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_RECV_PDU; @@ -1381,7 +2388,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - int len = NLMSG_SPACE(sizeof(*ev) + data_size); + int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { @@ -1390,7 +2397,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); memset(ev, 0, sizeof(*ev)); ev->type = type; ev->transport_handle = iscsi_handle(transport); @@ -1415,7 +2422,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev)); + int len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@ -1429,7 +2436,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; ev->r.connerror.error = error; @@ -1450,7 +2457,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn, struct sk_buff *skb; struct iscsi_uevent *ev; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev)); + int len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@ -1464,7 +2471,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn, } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; ev->r.conn_login.state = state; @@ -1484,7 +2491,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - int len = NLMSG_SPACE(sizeof(*ev) + data_size); + int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { @@ -1494,7 +2501,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport, } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_HOST_EVENT; ev->r.host_event.host_no = host_no; @@ -1515,7 +2522,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, struct nlmsghdr *nlh; struct sk_buff *skb; struct iscsi_uevent *ev; - int len = NLMSG_SPACE(sizeof(*ev) + data_size); + int len = nlmsg_total_size(sizeof(*ev) + data_size); skb = alloc_skb(len, GFP_NOIO); if (!skb) { @@ -1524,7 +2531,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport, } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(transport); ev->type = ISCSI_KEVENT_PING_COMP; ev->r.ping_comp.host_no = host_no; @@ -1543,7 +2550,7 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, { struct sk_buff *skb; struct nlmsghdr *nlh; - int len = NLMSG_SPACE(size); + int len = nlmsg_total_size(size); int flags = multi ? NLM_F_MULTI : 0; int t = done ? NLMSG_DONE : type; @@ -1555,24 +2562,24 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); nlh->nlmsg_flags = flags; - memcpy(NLMSG_DATA(nlh), payload, size); + memcpy(nlmsg_data(nlh), payload, size); return iscsi_multicast_skb(skb, group, GFP_ATOMIC); } static int iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) { - struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_stats *stats; struct sk_buff *skbstat; struct iscsi_cls_conn *conn; struct nlmsghdr *nlhstat; struct iscsi_uevent *evstat; struct iscsi_internal *priv; - int len = NLMSG_SPACE(sizeof(*ev) + - sizeof(struct iscsi_stats) + - sizeof(struct iscsi_stats_custom) * - ISCSI_STATS_CUSTOM_MAX); + int len = nlmsg_total_size(sizeof(*ev) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + ISCSI_STATS_CUSTOM_MAX); int err = 0; priv = iscsi_if_transport_lookup(transport); @@ -1595,7 +2602,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) nlhstat = __nlmsg_put(skbstat, 0, 0, 0, (len - sizeof(*nlhstat)), 0); - evstat = NLMSG_DATA(nlhstat); + evstat = nlmsg_data(nlhstat); memset(evstat, 0, sizeof(*evstat)); evstat->transport_handle = iscsi_handle(conn->transport); evstat->type = nlh->nlmsg_type; @@ -1608,12 +2615,12 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) memset(stats, 0, sizeof(*stats)); transport->get_stats(conn, stats); - actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + - sizeof(struct iscsi_stats) + - sizeof(struct iscsi_stats_custom) * - stats->custom_length); + actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) + + sizeof(struct iscsi_stats) + + sizeof(struct iscsi_stats_custom) * + stats->custom_length); actual_size -= sizeof(*nlhstat); - actual_size = NLMSG_LENGTH(actual_size); + actual_size = nlmsg_msg_size(actual_size); skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; @@ -1637,7 +2644,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, struct iscsi_uevent *ev; struct sk_buff *skb; struct nlmsghdr *nlh; - int rc, len = NLMSG_SPACE(sizeof(*ev)); + int rc, len = nlmsg_total_size(sizeof(*ev)); priv = iscsi_if_transport_lookup(session->transport); if (!priv) @@ -1653,7 +2660,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, } nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); ev->transport_handle = iscsi_handle(session->transport); ev->type = event; @@ -2005,7 +3012,7 @@ iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev) static int iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) { - struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_uevent *ev = nlmsg_data(nlh); struct Scsi_Host *shost = NULL; struct iscsi_chap_rec *chap_rec; struct iscsi_internal *priv; @@ -2024,7 +3031,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) return -EINVAL; chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); - len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); + len = nlmsg_total_size(sizeof(*ev) + chap_buf_size); shost = scsi_host_lookup(ev->u.get_chap.host_no); if (!shost) { @@ -2045,7 +3052,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) nlhchap = __nlmsg_put(skbchap, 0, 0, 0, (len - sizeof(*nlhchap)), 0); - evchap = NLMSG_DATA(nlhchap); + evchap = nlmsg_data(nlhchap); memset(evchap, 0, sizeof(*evchap)); evchap->transport_handle = iscsi_handle(transport); evchap->type = nlh->nlmsg_type; @@ -2058,7 +3065,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, &evchap->u.get_chap.num_entries, buf); - actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); + actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size); skb_trim(skbchap, NLMSG_ALIGN(actual_size)); nlhchap->nlmsg_len = actual_size; @@ -2071,6 +3078,28 @@ exit_get_chap: return err; } +static int iscsi_set_chap(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int err = 0; + + if (!transport->set_chap) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_path.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.set_path.host_no); + return -ENODEV; + } + + err = transport->set_chap(shost, data, len); + scsi_host_put(shost); + return err; +} + static int iscsi_delete_chap(struct iscsi_transport *transport, struct iscsi_uevent *ev) { @@ -2092,11 +3121,372 @@ static int iscsi_delete_chap(struct iscsi_transport *transport, return err; } +static const struct { + enum iscsi_discovery_parent_type value; + char *name; +} iscsi_discovery_parent_names[] = { + {ISCSI_DISC_PARENT_UNKNOWN, "Unknown" }, + {ISCSI_DISC_PARENT_SENDTGT, "Sendtarget" }, + {ISCSI_DISC_PARENT_ISNS, "isns" }, +}; + +char *iscsi_get_discovery_parent_name(int parent_type) +{ + int i; + char *state = "Unknown!"; + + for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) { + if (iscsi_discovery_parent_names[i].value & parent_type) { + state = iscsi_discovery_parent_names[i].name; + break; + } + } + return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name); + +static int iscsi_set_flashnode_param(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t idx; + int err = 0; + + if (!transport->set_flashnode_param) { + err = -ENOSYS; + goto exit_set_fnode; + } + + shost = scsi_host_lookup(ev->u.set_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.set_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = ev->u.set_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.set_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) { + err = -ENODEV; + goto put_sess; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_set_fnode: + return err; +} + +static int iscsi_new_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int index; + int err = 0; + + if (!transport->new_flashnode) { + err = -ENOSYS; + goto exit_new_fnode; + } + + shost = scsi_host_lookup(ev->u.new_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.new_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + index = transport->new_flashnode(shost, data, len); + + if (index >= 0) + ev->r.new_flashnode_ret.flashnode_idx = index; + else + err = -EIO; + +put_host: + scsi_host_put(shost); + +exit_new_fnode: + return err; +} + +static int iscsi_del_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + uint32_t idx; + int err = 0; + + if (!transport->del_flashnode) { + err = -ENOSYS; + goto exit_del_fnode; + } + + shost = scsi_host_lookup(ev->u.del_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.del_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = ev->u.del_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.del_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + err = transport->del_flashnode(fnode_sess); + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_del_fnode: + return err; +} + +static int iscsi_login_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t idx; + int err = 0; + + if (!transport->login_flashnode) { + err = -ENOSYS; + goto exit_login_fnode; + } + + shost = scsi_host_lookup(ev->u.login_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.login_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = ev->u.login_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.login_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) { + err = -ENODEV; + goto put_sess; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + err = transport->login_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_login_fnode: + return err; +} + +static int iscsi_logout_flashnode(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_bus_flash_session *fnode_sess; + struct iscsi_bus_flash_conn *fnode_conn; + struct device *dev; + uint32_t idx; + int err = 0; + + if (!transport->logout_flashnode) { + err = -ENOSYS; + goto exit_logout_fnode; + } + + shost = scsi_host_lookup(ev->u.logout_flashnode.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + idx = ev->u.logout_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); + if (!fnode_sess) { + pr_err("%s could not find flashnode %u for host no %u\n", + __func__, idx, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + dev = iscsi_find_flashnode_conn(fnode_sess); + if (!dev) { + err = -ENODEV; + goto put_sess; + } + + fnode_conn = iscsi_dev_to_flash_conn(dev); + + err = transport->logout_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); + +put_host: + scsi_host_put(shost); + +exit_logout_fnode: + return err; +} + +static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *session; + int err = 0; + + if (!transport->logout_flashnode_sid) { + err = -ENOSYS; + goto exit_logout_sid; + } + + shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no); + if (!shost) { + pr_err("%s could not find host no %u\n", + __func__, ev->u.logout_flashnode.host_no); + err = -ENODEV; + goto put_host; + } + + session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid); + if (!session) { + pr_err("%s could not find session id %u\n", + __func__, ev->u.logout_flashnode_sid.sid); + err = -EINVAL; + goto put_host; + } + + err = transport->logout_flashnode_sid(session); + +put_host: + scsi_host_put(shost); + +exit_logout_sid: + return err; +} + +static int +iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) +{ + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct Scsi_Host *shost = NULL; + struct iscsi_internal *priv; + struct sk_buff *skbhost_stats; + struct nlmsghdr *nlhhost_stats; + struct iscsi_uevent *evhost_stats; + int host_stats_size = 0; + int len, err = 0; + char *buf; + + if (!transport->get_host_stats) + return -EINVAL; + + priv = iscsi_if_transport_lookup(transport); + if (!priv) + return -EINVAL; + + host_stats_size = sizeof(struct iscsi_offload_host_stats); + len = nlmsg_total_size(sizeof(*ev) + host_stats_size); + + shost = scsi_host_lookup(ev->u.get_host_stats.host_no); + if (!shost) { + pr_err("%s: failed. Cound not find host no %u\n", + __func__, ev->u.get_host_stats.host_no); + return -ENODEV; + } + + do { + int actual_size; + + skbhost_stats = alloc_skb(len, GFP_KERNEL); + if (!skbhost_stats) { + pr_err("cannot deliver host stats: OOM\n"); + err = -ENOMEM; + goto exit_host_stats; + } + + nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0, + (len - sizeof(*nlhhost_stats)), 0); + evhost_stats = nlmsg_data(nlhhost_stats); + memset(evhost_stats, 0, sizeof(*evhost_stats)); + evhost_stats->transport_handle = iscsi_handle(transport); + evhost_stats->type = nlh->nlmsg_type; + evhost_stats->u.get_host_stats.host_no = + ev->u.get_host_stats.host_no; + buf = (char *)((char *)evhost_stats + sizeof(*evhost_stats)); + memset(buf, 0, host_stats_size); + + err = transport->get_host_stats(shost, buf, host_stats_size); + + actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); + skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); + nlhhost_stats->nlmsg_len = actual_size; + + err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID, + GFP_KERNEL); + } while (err < 0 && err != -ECONNREFUSED); + +exit_host_stats: + scsi_host_put(shost); + return err; +} + + static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; - struct iscsi_uevent *ev = NLMSG_DATA(nlh); + struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; struct iscsi_cls_session *session; @@ -2246,6 +3636,34 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) case ISCSI_UEVENT_DELETE_CHAP: err = iscsi_delete_chap(transport, ev); break; + case ISCSI_UEVENT_SET_FLASHNODE_PARAMS: + err = iscsi_set_flashnode_param(transport, ev, + nlmsg_attrlen(nlh, + sizeof(*ev))); + break; + case ISCSI_UEVENT_NEW_FLASHNODE: + err = iscsi_new_flashnode(transport, ev, + nlmsg_attrlen(nlh, sizeof(*ev))); + break; + case ISCSI_UEVENT_DEL_FLASHNODE: + err = iscsi_del_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGIN_FLASHNODE: + err = iscsi_login_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGOUT_FLASHNODE: + err = iscsi_logout_flashnode(transport, ev); + break; + case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID: + err = iscsi_logout_flashnode_sid(transport, ev); + break; + case ISCSI_UEVENT_SET_CHAP: + err = iscsi_set_chap(transport, ev, + nlmsg_attrlen(nlh, sizeof(*ev))); + break; + case ISCSI_UEVENT_GET_HOST_STATS: + err = iscsi_get_host_stats(transport, nlh); + break; default: err = -ENOSYS; break; @@ -2263,7 +3681,7 @@ static void iscsi_if_rx(struct sk_buff *skb) { mutex_lock(&rx_queue_mutex); - while (skb->len >= NLMSG_SPACE(0)) { + while (skb->len >= NLMSG_HDRLEN) { int err; uint32_t rlen; struct nlmsghdr *nlh; @@ -2276,7 +3694,7 @@ iscsi_if_rx(struct sk_buff *skb) break; } - ev = NLMSG_DATA(nlh); + ev = nlmsg_data(nlh); rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; @@ -2338,6 +3756,24 @@ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); +iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT); +iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN); +iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO); +iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE); +iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT); +iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE); +iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE); +iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE); +iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN); +iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE); +iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS); +iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC); +iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL); +iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6); +iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF); +iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF); +iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR); + #define iscsi_conn_ep_attr_show(param) \ static ssize_t show_conn_ep_param_##param(struct device *dev, \ @@ -2390,6 +3826,23 @@ static struct attribute *iscsi_conn_attrs[] = { &dev_attr_conn_persistent_port.attr, &dev_attr_conn_ping_tmo.attr, &dev_attr_conn_recv_tmo.attr, + &dev_attr_conn_local_port.attr, + &dev_attr_conn_statsn.attr, + &dev_attr_conn_keepalive_tmo.attr, + &dev_attr_conn_max_segment_size.attr, + &dev_attr_conn_tcp_timestamp_stat.attr, + &dev_attr_conn_tcp_wsf_disable.attr, + &dev_attr_conn_tcp_nagle_disable.attr, + &dev_attr_conn_tcp_timer_scale.attr, + &dev_attr_conn_tcp_timestamp_enable.attr, + &dev_attr_conn_fragment_disable.attr, + &dev_attr_conn_ipv4_tos.attr, + &dev_attr_conn_ipv6_traffic_class.attr, + &dev_attr_conn_ipv6_flow_label.attr, + &dev_attr_conn_is_fw_assigned_ipv6.attr, + &dev_attr_conn_tcp_xmit_wsf.attr, + &dev_attr_conn_tcp_recv_wsf.attr, + &dev_attr_conn_local_ipaddr.attr, NULL, }; @@ -2427,6 +3880,40 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj, param = ISCSI_PARAM_PING_TMO; else if (attr == &dev_attr_conn_recv_tmo.attr) param = ISCSI_PARAM_RECV_TMO; + else if (attr == &dev_attr_conn_local_port.attr) + param = ISCSI_PARAM_LOCAL_PORT; + else if (attr == &dev_attr_conn_statsn.attr) + param = ISCSI_PARAM_STATSN; + else if (attr == &dev_attr_conn_keepalive_tmo.attr) + param = ISCSI_PARAM_KEEPALIVE_TMO; + else if (attr == &dev_attr_conn_max_segment_size.attr) + param = ISCSI_PARAM_MAX_SEGMENT_SIZE; + else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr) + param = ISCSI_PARAM_TCP_TIMESTAMP_STAT; + else if (attr == &dev_attr_conn_tcp_wsf_disable.attr) + param = ISCSI_PARAM_TCP_WSF_DISABLE; + else if (attr == &dev_attr_conn_tcp_nagle_disable.attr) + param = ISCSI_PARAM_TCP_NAGLE_DISABLE; + else if (attr == &dev_attr_conn_tcp_timer_scale.attr) + param = ISCSI_PARAM_TCP_TIMER_SCALE; + else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr) + param = ISCSI_PARAM_TCP_TIMESTAMP_EN; + else if (attr == &dev_attr_conn_fragment_disable.attr) + param = ISCSI_PARAM_IP_FRAGMENT_DISABLE; + else if (attr == &dev_attr_conn_ipv4_tos.attr) + param = ISCSI_PARAM_IPV4_TOS; + else if (attr == &dev_attr_conn_ipv6_traffic_class.attr) + param = ISCSI_PARAM_IPV6_TC; + else if (attr == &dev_attr_conn_ipv6_flow_label.attr) + param = ISCSI_PARAM_IPV6_FLOW_LABEL; + else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr) + param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6; + else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr) + param = ISCSI_PARAM_TCP_XMIT_WSF; + else if (attr == &dev_attr_conn_tcp_recv_wsf.attr) + param = ISCSI_PARAM_TCP_RECV_WSF; + else if (attr == &dev_attr_conn_local_ipaddr.attr) + param = ISCSI_PARAM_LOCAL_IPADDR; else { WARN_ONCE(1, "Invalid conn attr"); return 0; @@ -2484,6 +3971,24 @@ iscsi_session_attr(tgt_reset_tmo, ISCSI_PARAM_TGT_RESET_TMO, 0); iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0); iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0); iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0); +iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0); +iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0); +iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0); +iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0); +iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0); +iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0); +iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0); +iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0); +iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0); +iscsi_session_attr(discovery_auth_optional, + ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0); +iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0); +iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0); +iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0); +iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0); +iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0); +iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0); +iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0); static ssize_t show_priv_session_state(struct device *dev, struct device_attribute *attr, @@ -2579,12 +4084,29 @@ static struct attribute *iscsi_session_attrs[] = { &dev_attr_sess_ifacename.attr, &dev_attr_sess_initiatorname.attr, &dev_attr_sess_targetalias.attr, + &dev_attr_sess_boot_root.attr, + &dev_attr_sess_boot_nic.attr, + &dev_attr_sess_boot_target.attr, &dev_attr_priv_sess_recovery_tmo.attr, &dev_attr_priv_sess_state.attr, &dev_attr_priv_sess_creator.attr, &dev_attr_sess_chap_out_idx.attr, &dev_attr_sess_chap_in_idx.attr, &dev_attr_priv_sess_target_id.attr, + &dev_attr_sess_auto_snd_tgt_disable.attr, + &dev_attr_sess_discovery_session.attr, + &dev_attr_sess_portal_type.attr, + &dev_attr_sess_chap_auth.attr, + &dev_attr_sess_discovery_logout.attr, + &dev_attr_sess_bidi_chap.attr, + &dev_attr_sess_discovery_auth_optional.attr, + &dev_attr_sess_def_time2wait.attr, + &dev_attr_sess_def_time2retain.attr, + &dev_attr_sess_isid.attr, + &dev_attr_sess_tsid.attr, + &dev_attr_sess_def_taskmgmt_tmo.attr, + &dev_attr_sess_discovery_parent_idx.attr, + &dev_attr_sess_discovery_parent_type.attr, NULL, }; @@ -2642,6 +4164,40 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj, param = ISCSI_PARAM_INITIATOR_NAME; else if (attr == &dev_attr_sess_targetalias.attr) param = ISCSI_PARAM_TARGET_ALIAS; + else if (attr == &dev_attr_sess_boot_root.attr) + param = ISCSI_PARAM_BOOT_ROOT; + else if (attr == &dev_attr_sess_boot_nic.attr) + param = ISCSI_PARAM_BOOT_NIC; + else if (attr == &dev_attr_sess_boot_target.attr) + param = ISCSI_PARAM_BOOT_TARGET; + else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr) + param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE; + else if (attr == &dev_attr_sess_discovery_session.attr) + param = ISCSI_PARAM_DISCOVERY_SESS; + else if (attr == &dev_attr_sess_portal_type.attr) + param = ISCSI_PARAM_PORTAL_TYPE; + else if (attr == &dev_attr_sess_chap_auth.attr) + param = ISCSI_PARAM_CHAP_AUTH_EN; + else if (attr == &dev_attr_sess_discovery_logout.attr) + param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN; + else if (attr == &dev_attr_sess_bidi_chap.attr) + param = ISCSI_PARAM_BIDI_CHAP_EN; + else if (attr == &dev_attr_sess_discovery_auth_optional.attr) + param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL; + else if (attr == &dev_attr_sess_def_time2wait.attr) + param = ISCSI_PARAM_DEF_TIME2WAIT; + else if (attr == &dev_attr_sess_def_time2retain.attr) + param = ISCSI_PARAM_DEF_TIME2RETAIN; + else if (attr == &dev_attr_sess_isid.attr) + param = ISCSI_PARAM_ISID; + else if (attr == &dev_attr_sess_tsid.attr) + param = ISCSI_PARAM_TSID; + else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr) + param = ISCSI_PARAM_DEF_TASKMGMT_TMO; + else if (attr == &dev_attr_sess_discovery_parent_idx.attr) + param = ISCSI_PARAM_DISCOVERY_PARENT_IDX; + else if (attr == &dev_attr_sess_discovery_parent_type.attr) + param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE; else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) return S_IRUGO | S_IWUSR; else if (attr == &dev_attr_priv_sess_state.attr) @@ -2981,20 +4537,28 @@ static __init int iscsi_transport_init(void) if (err) goto unregister_conn_class; + err = bus_register(&iscsi_flashnode_bus); + if (err) + goto unregister_session_class; + nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg); if (!nls) { err = -ENOBUFS; - goto unregister_session_class; + goto unregister_flashnode_bus; } iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); - if (!iscsi_eh_timer_workq) + if (!iscsi_eh_timer_workq) { + err = -ENOMEM; goto release_nls; + } return 0; release_nls: netlink_kernel_release(nls); +unregister_flashnode_bus: + bus_unregister(&iscsi_flashnode_bus); unregister_session_class: transport_class_unregister(&iscsi_session_class); unregister_conn_class: @@ -3014,6 +4578,7 @@ static void __exit iscsi_transport_exit(void) { destroy_workqueue(iscsi_eh_timer_workq); netlink_kernel_release(nls); + bus_unregister(&iscsi_flashnode_bus); transport_class_unregister(&iscsi_connection_class); transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_host_class); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 1b681427dde..c341f855fad 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy) list_del(&rphy->list); mutex_unlock(&sas_host->lock); - sas_bsg_remove(shost, rphy); - transport_destroy_device(dev); put_device(dev); @@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); + sas_bsg_remove(NULL, rphy); transport_remove_device(dev); device_del(dev); } diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index f379c7f3034..13e898332e4 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -24,12 +24,15 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/delay.h> #include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_srp.h> +#include "scsi_priv.h" #include "scsi_transport_srp_internal.h" struct srp_host_attrs { @@ -38,7 +41,7 @@ struct srp_host_attrs { #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) #define SRP_HOST_ATTRS 0 -#define SRP_RPORT_ATTRS 3 +#define SRP_RPORT_ATTRS 8 struct srp_internal { struct scsi_transport_template t; @@ -54,6 +57,43 @@ struct srp_internal { #define dev_to_rport(d) container_of(d, struct srp_rport, dev) #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) +static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) +{ + return dev_to_shost(r->dev.parent); +} + +/** + * srp_tmo_valid() - check timeout combination validity + * @reconnect_delay: Reconnect delay in seconds. + * @fast_io_fail_tmo: Fast I/O fail timeout in seconds. + * @dev_loss_tmo: Device loss timeout in seconds. + * + * The combination of the timeout parameters must be such that SCSI commands + * are finished in a reasonable time. Hence do not allow the fast I/O fail + * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to + * exceed that limit if failing I/O fast has been disabled. Furthermore, these + * parameters must be such that multipath can detect failed paths timely. + * Hence do not allow all three parameters to be disabled simultaneously. + */ +int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo) +{ + if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) + return -EINVAL; + if (reconnect_delay == 0) + return -EINVAL; + if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + if (fast_io_fail_tmo < 0 && + dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) + return -EINVAL; + if (dev_loss_tmo >= LONG_MAX / HZ) + return -EINVAL; + if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 && + fast_io_fail_tmo >= dev_loss_tmo) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL_GPL(srp_tmo_valid); static int srp_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) @@ -134,6 +174,456 @@ static ssize_t store_srp_rport_delete(struct device *dev, static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete); +static ssize_t show_srp_rport_state(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + static const char *const state_name[] = { + [SRP_RPORT_RUNNING] = "running", + [SRP_RPORT_BLOCKED] = "blocked", + [SRP_RPORT_FAIL_FAST] = "fail-fast", + [SRP_RPORT_LOST] = "lost", + }; + struct srp_rport *rport = transport_class_to_srp_rport(dev); + enum srp_rport_state state = rport->state; + + return sprintf(buf, "%s\n", + (unsigned)state < ARRAY_SIZE(state_name) ? + state_name[state] : "???"); +} + +static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL); + +static ssize_t srp_show_tmo(char *buf, int tmo) +{ + return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); +} + +static int srp_parse_tmo(int *tmo, const char *buf) +{ + int res = 0; + + if (strncmp(buf, "off", 3) != 0) + res = kstrtoint(buf, 0, tmo); + else + *tmo = -1; + + return res; +} + +static ssize_t show_reconnect_delay(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->reconnect_delay); +} + +static ssize_t store_reconnect_delay(struct device *dev, + struct device_attribute *attr, + const char *buf, const size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res, delay; + + res = srp_parse_tmo(&delay, buf); + if (res) + goto out; + res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + + if (rport->reconnect_delay <= 0 && delay > 0 && + rport->state != SRP_RPORT_RUNNING) { + queue_delayed_work(system_long_wq, &rport->reconnect_work, + delay * HZ); + } else if (delay <= 0) { + cancel_delayed_work(&rport->reconnect_work); + } + rport->reconnect_delay = delay; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay, + store_reconnect_delay); + +static ssize_t show_failed_reconnects(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return sprintf(buf, "%d\n", rport->failed_reconnects); +} + +static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL); + +static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->fast_io_fail_tmo); +} + +static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int fast_io_fail_tmo; + + res = srp_parse_tmo(&fast_io_fail_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, + rport->dev_loss_tmo); + if (res) + goto out; + rport->fast_io_fail_tmo = fast_io_fail_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_fast_io_fail_tmo, + store_srp_rport_fast_io_fail_tmo); + +static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + + return srp_show_tmo(buf, rport->dev_loss_tmo); +} + +static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct srp_rport *rport = transport_class_to_srp_rport(dev); + int res; + int dev_loss_tmo; + + res = srp_parse_tmo(&dev_loss_tmo, buf); + if (res) + goto out; + res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, + dev_loss_tmo); + if (res) + goto out; + rport->dev_loss_tmo = dev_loss_tmo; + res = count; + +out: + return res; +} + +static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR, + show_srp_rport_dev_loss_tmo, + store_srp_rport_dev_loss_tmo); + +static int srp_rport_set_state(struct srp_rport *rport, + enum srp_rport_state new_state) +{ + enum srp_rport_state old_state = rport->state; + + lockdep_assert_held(&rport->mutex); + + switch (new_state) { + case SRP_RPORT_RUNNING: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_BLOCKED: + switch (old_state) { + case SRP_RPORT_RUNNING: + break; + default: + goto invalid; + } + break; + case SRP_RPORT_FAIL_FAST: + switch (old_state) { + case SRP_RPORT_LOST: + goto invalid; + default: + break; + } + break; + case SRP_RPORT_LOST: + break; + } + rport->state = new_state; + return 0; + +invalid: + return -EINVAL; +} + +/** + * srp_reconnect_work() - reconnect and schedule a new attempt if necessary + * @work: Work structure used for scheduling this operation. + */ +static void srp_reconnect_work(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, reconnect_work); + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, res; + + res = srp_reconnect_rport(rport); + if (res != 0) { + shost_printk(KERN_ERR, shost, + "reconnect attempt %d failed (%d)\n", + ++rport->failed_reconnects, res); + delay = rport->reconnect_delay * + min(100, max(1, rport->failed_reconnects - 10)); + if (delay > 0) + queue_delayed_work(system_long_wq, + &rport->reconnect_work, delay * HZ); + } +} + +static void __rport_fail_io_fast(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i; + + lockdep_assert_held(&rport->mutex); + + if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) + return; + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + + /* Involve the LLD if possible to terminate all I/O on the rport. */ + i = to_srp_internal(shost->transportt); + if (i->f->terminate_rport_io) + i->f->terminate_rport_io(rport); +} + +/** + * rport_fast_io_fail_timedout() - fast I/O failure timeout handler + * @work: Work structure used for scheduling this operation. + */ +static void rport_fast_io_fail_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, fast_io_fail_work); + struct Scsi_Host *shost = rport_to_shost(rport); + + pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + mutex_unlock(&rport->mutex); +} + +/** + * rport_dev_loss_timedout() - device loss timeout handler + * @work: Work structure used for scheduling this operation. + */ +static void rport_dev_loss_timedout(struct work_struct *work) +{ + struct srp_rport *rport = container_of(to_delayed_work(work), + struct srp_rport, dev_loss_work); + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_info("dev_loss_tmo expired for SRP %s / %s.\n", + dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + + mutex_lock(&rport->mutex); + WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); + scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + mutex_unlock(&rport->mutex); + + i->f->rport_delete(rport); +} + +static void __srp_start_tl_fail_timers(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + int delay, fast_io_fail_tmo, dev_loss_tmo; + + lockdep_assert_held(&rport->mutex); + + delay = rport->reconnect_delay; + fast_io_fail_tmo = rport->fast_io_fail_tmo; + dev_loss_tmo = rport->dev_loss_tmo; + pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev), + rport->state); + + if (rport->state == SRP_RPORT_LOST) + return; + if (delay > 0) + queue_delayed_work(system_long_wq, &rport->reconnect_work, + 1UL * delay * HZ); + if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { + pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), + rport->state); + scsi_target_block(&shost->shost_gendev); + if (fast_io_fail_tmo >= 0) + queue_delayed_work(system_long_wq, + &rport->fast_io_fail_work, + 1UL * fast_io_fail_tmo * HZ); + if (dev_loss_tmo >= 0) + queue_delayed_work(system_long_wq, + &rport->dev_loss_work, + 1UL * dev_loss_tmo * HZ); + } +} + +/** + * srp_start_tl_fail_timers() - start the transport layer failure timers + * @rport: SRP target port. + * + * Start the transport layer fast I/O failure and device loss timers. Do not + * modify a timer that was already started. + */ +void srp_start_tl_fail_timers(struct srp_rport *rport) +{ + mutex_lock(&rport->mutex); + __srp_start_tl_fail_timers(rport); + mutex_unlock(&rport->mutex); +} +EXPORT_SYMBOL(srp_start_tl_fail_timers); + +/** + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() + * @shost: SCSI host for which to count the number of scsi_request_fn() callers. + */ +static int scsi_request_fn_active(struct Scsi_Host *shost) +{ + struct scsi_device *sdev; + struct request_queue *q; + int request_fn_active = 0; + + shost_for_each_device(sdev, shost) { + q = sdev->request_queue; + + spin_lock_irq(q->queue_lock); + request_fn_active += q->request_fn_active; + spin_unlock_irq(q->queue_lock); + } + + return request_fn_active; +} + +/** + * srp_reconnect_rport() - reconnect to an SRP target port + * @rport: SRP target port. + * + * Blocks SCSI command queueing before invoking reconnect() such that + * queuecommand() won't be invoked concurrently with reconnect() from outside + * the SCSI EH. This is important since a reconnect() implementation may + * reallocate resources needed by queuecommand(). + * + * Notes: + * - This function neither waits until outstanding requests have finished nor + * tries to abort these. It is the responsibility of the reconnect() + * function to finish outstanding commands before reconnecting to the target + * port. + * - It is the responsibility of the caller to ensure that the resources + * reallocated by the reconnect() function won't be used while this function + * is in progress. One possible strategy is to invoke this function from + * the context of the SCSI EH thread only. Another possible strategy is to + * lock the rport mutex inside each SCSI LLD callback that can be invoked by + * the SCSI EH (the scsi_host_template.eh_*() functions and also the + * scsi_host_template.queuecommand() function). + */ +int srp_reconnect_rport(struct srp_rport *rport) +{ + struct Scsi_Host *shost = rport_to_shost(rport); + struct srp_internal *i = to_srp_internal(shost->transportt); + struct scsi_device *sdev; + int res; + + pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev)); + + res = mutex_lock_interruptible(&rport->mutex); + if (res) + goto out; + scsi_target_block(&shost->shost_gendev); + while (scsi_request_fn_active(shost)) + msleep(20); + res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; + pr_debug("%s (state %d): transport.reconnect() returned %d\n", + dev_name(&shost->shost_gendev), rport->state, res); + if (res == 0) { + cancel_delayed_work(&rport->fast_io_fail_work); + cancel_delayed_work(&rport->dev_loss_work); + + rport->failed_reconnects = 0; + srp_rport_set_state(rport, SRP_RPORT_RUNNING); + scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); + /* + * If the SCSI error handler has offlined one or more devices, + * invoking scsi_target_unblock() won't change the state of + * these devices into running so do that explicitly. + */ + spin_lock_irq(shost->host_lock); + __shost_for_each_device(sdev, shost) + if (sdev->sdev_state == SDEV_OFFLINE) + sdev->sdev_state = SDEV_RUNNING; + spin_unlock_irq(shost->host_lock); + } else if (rport->state == SRP_RPORT_RUNNING) { + /* + * srp_reconnect_rport() has been invoked with fast_io_fail + * and dev_loss off. Mark the port as failed and start the TL + * failure timers if these had not yet been started. + */ + __rport_fail_io_fast(rport); + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + __srp_start_tl_fail_timers(rport); + } else if (rport->state != SRP_RPORT_BLOCKED) { + scsi_target_unblock(&shost->shost_gendev, + SDEV_TRANSPORT_OFFLINE); + } + mutex_unlock(&rport->mutex); + +out: + return res; +} +EXPORT_SYMBOL(srp_reconnect_rport); + +/** + * srp_timed_out() - SRP transport intercept of the SCSI timeout EH + * @scmd: SCSI command. + * + * If a timeout occurs while an rport is in the blocked state, ask the SCSI + * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (BLK_EH_NOT_HANDLED). + * + * Note: This function is called from soft-IRQ context and with the request + * queue lock held. + */ +static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +{ + struct scsi_device *sdev = scmd->device; + struct Scsi_Host *shost = sdev->host; + struct srp_internal *i = to_srp_internal(shost->transportt); + + pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); + return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? + BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} + static void srp_rport_release(struct device *dev) { struct srp_rport *rport = dev_to_rport(dev); @@ -185,6 +675,26 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev) } /** + * srp_rport_get() - increment rport reference count + * @rport: SRP target port. + */ +void srp_rport_get(struct srp_rport *rport) +{ + get_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_get); + +/** + * srp_rport_put() - decrement rport reference count + * @rport: SRP target port. + */ +void srp_rport_put(struct srp_rport *rport) +{ + put_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_put); + +/** * srp_rport_add - add a SRP remote port to the device hierarchy * @shost: scsi host the remote port is connected to. * @ids: The port id for the remote port. @@ -196,12 +706,15 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, { struct srp_rport *rport; struct device *parent = &shost->shost_gendev; + struct srp_internal *i = to_srp_internal(shost->transportt); int id, ret; rport = kzalloc(sizeof(*rport), GFP_KERNEL); if (!rport) return ERR_PTR(-ENOMEM); + mutex_init(&rport->mutex); + device_initialize(&rport->dev); rport->dev.parent = get_device(parent); @@ -210,6 +723,17 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); rport->roles = ids->roles; + if (i->f->reconnect) + rport->reconnect_delay = i->f->reconnect_delay ? + *i->f->reconnect_delay : 10; + INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); + rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? + *i->f->fast_io_fail_tmo : 15; + rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; + INIT_DELAYED_WORK(&rport->fast_io_fail_work, + rport_fast_io_fail_timedout); + INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); + id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); @@ -259,6 +783,7 @@ void srp_rport_del(struct srp_rport *rport) transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); + put_device(dev); } EXPORT_SYMBOL_GPL(srp_rport_del); @@ -283,6 +808,28 @@ void srp_remove_host(struct Scsi_Host *shost) } EXPORT_SYMBOL_GPL(srp_remove_host); +/** + * srp_stop_rport_timers - stop the transport layer recovery timers + * @rport: SRP remote port for which to stop the timers. + * + * Must be called after srp_remove_host() and scsi_remove_host(). The caller + * must hold a reference on the rport (rport->dev) and on the SCSI host + * (rport->dev.parent). + */ +void srp_stop_rport_timers(struct srp_rport *rport) +{ + mutex_lock(&rport->mutex); + if (rport->state == SRP_RPORT_BLOCKED) + __rport_fail_io_fast(rport); + srp_rport_set_state(rport, SRP_RPORT_LOST); + mutex_unlock(&rport->mutex); + + cancel_delayed_work_sync(&rport->reconnect_work); + cancel_delayed_work_sync(&rport->fast_io_fail_work); + cancel_delayed_work_sync(&rport->dev_loss_work); +} +EXPORT_SYMBOL_GPL(srp_stop_rport_timers); + static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id, int result) { @@ -310,6 +857,8 @@ srp_attach_transport(struct srp_function_template *ft) if (!i) return NULL; + i->t.eh_timed_out = srp_timed_out; + i->t.tsk_mgmt_response = srp_tsk_mgmt_response; i->t.it_nexus_response = srp_it_nexus_response; @@ -327,6 +876,15 @@ srp_attach_transport(struct srp_function_template *ft) count = 0; i->rport_attrs[count++] = &dev_attr_port_id; i->rport_attrs[count++] = &dev_attr_roles; + if (ft->has_rport_state) { + i->rport_attrs[count++] = &dev_attr_state; + i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo; + i->rport_attrs[count++] = &dev_attr_dev_loss_tmo; + } + if (ft->reconnect) { + i->rport_attrs[count++] = &dev_attr_reconnect_delay; + i->rport_attrs[count++] = &dev_attr_failed_reconnects; + } if (ft->rport_delete) i->rport_attrs[count++] = &dev_attr_delete; i->rport_attrs[count++] = NULL; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7992635d405..6825eda1114 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -105,11 +105,14 @@ static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); static int sd_remove(struct device *); static void sd_shutdown(struct device *); -static int sd_suspend(struct device *); +static int sd_suspend_system(struct device *); +static int sd_suspend_runtime(struct device *); static int sd_resume(struct device *); static void sd_rescan(struct device *); +static int sd_init_command(struct scsi_cmnd *SCpnt); +static void sd_uninit_command(struct scsi_cmnd *SCpnt); static int sd_done(struct scsi_cmnd *); -static int sd_eh_action(struct scsi_cmnd *, unsigned char *, int, int); +static int sd_eh_action(struct scsi_cmnd *, int); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); @@ -132,8 +135,8 @@ static const char *sd_cache_types[] = { }; static ssize_t -sd_store_cache_type(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +cache_type_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { int i, ct = -1, rcd, wce, sp; struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -142,6 +145,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + static const char temp[] = "temporary "; int len; if (sdp->type != TYPE_DISK) @@ -150,6 +154,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, * it's not worth the risk */ return -EINVAL; + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { len = strlen(sd_cache_types[i]); if (strncmp(sd_cache_types[i], buf, len) == 0 && @@ -162,6 +173,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = ct & 0x02 ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + return count; + } + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, SD_MAX_RETRIES, &data, NULL)) return -EINVAL; @@ -184,8 +202,18 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +manage_start_stop_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + return snprintf(buf, 20, "%u\n", sdp->manage_start_stop); +} + +static ssize_t +manage_start_stop_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -197,10 +225,19 @@ sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(manage_start_stop); + +static ssize_t +allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); +} static ssize_t -sd_store_allow_restart(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +allow_restart_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -215,47 +252,30 @@ sd_store_allow_restart(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(allow_restart); static ssize_t -sd_show_cache_type(struct device *dev, struct device_attribute *attr, - char *buf) +cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); int ct = sdkp->RCD + 2*sdkp->WCE; return snprintf(buf, 40, "%s\n", sd_cache_types[ct]); } +static DEVICE_ATTR_RW(cache_type); static ssize_t -sd_show_fua(struct device *dev, struct device_attribute *attr, char *buf) +FUA_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return snprintf(buf, 20, "%u\n", sdkp->DPOFUA); } +static DEVICE_ATTR_RO(FUA); static ssize_t -sd_show_manage_start_stop(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct scsi_disk *sdkp = to_scsi_disk(dev); - struct scsi_device *sdp = sdkp->device; - - return snprintf(buf, 20, "%u\n", sdp->manage_start_stop); -} - -static ssize_t -sd_show_allow_restart(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct scsi_disk *sdkp = to_scsi_disk(dev); - - return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); -} - -static ssize_t -sd_show_protection_type(struct device *dev, struct device_attribute *attr, - char *buf) +protection_type_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -263,8 +283,8 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_protection_type(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +protection_type_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); unsigned int val; @@ -283,10 +303,11 @@ sd_store_protection_type(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(protection_type); static ssize_t -sd_show_protection_mode(struct device *dev, struct device_attribute *attr, - char *buf) +protection_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -305,24 +326,26 @@ sd_show_protection_mode(struct device *dev, struct device_attribute *attr, return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif); } +static DEVICE_ATTR_RO(protection_mode); static ssize_t -sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, - char *buf) +app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return snprintf(buf, 20, "%u\n", sdkp->ATO); } +static DEVICE_ATTR_RO(app_tag_own); static ssize_t -sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr, - char *buf) +thin_provisioning_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); return snprintf(buf, 20, "%u\n", sdkp->lbpme); } +static DEVICE_ATTR_RO(thin_provisioning); static const char *lbp_mode[] = { [SD_LBP_FULL] = "full", @@ -334,8 +357,8 @@ static const char *lbp_mode[] = { }; static ssize_t -sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr, - char *buf) +provisioning_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -343,8 +366,8 @@ sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +provisioning_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -370,10 +393,11 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR_RW(provisioning_mode); static ssize_t -sd_show_max_medium_access_timeouts(struct device *dev, - struct device_attribute *attr, char *buf) +max_medium_access_timeouts_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -381,9 +405,9 @@ sd_show_max_medium_access_timeouts(struct device *dev, } static ssize_t -sd_store_max_medium_access_timeouts(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +max_medium_access_timeouts_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); int err; @@ -395,10 +419,11 @@ sd_store_max_medium_access_timeouts(struct device *dev, return err ? err : count; } +static DEVICE_ATTR_RW(max_medium_access_timeouts); static ssize_t -sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, - char *buf) +max_write_same_blocks_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct scsi_disk *sdkp = to_scsi_disk(dev); @@ -406,8 +431,8 @@ sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr, } static ssize_t -sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +max_write_same_blocks_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct scsi_disk *sdkp = to_scsi_disk(dev); struct scsi_device *sdp = sdkp->device; @@ -427,50 +452,46 @@ sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr, if (max == 0) sdp->no_write_same = 1; - else if (max <= SD_MAX_WS16_BLOCKS) + else if (max <= SD_MAX_WS16_BLOCKS) { + sdp->no_write_same = 0; sdkp->max_ws_blocks = max; + } sd_config_write_same(sdkp); return count; } - -static struct device_attribute sd_disk_attrs[] = { - __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, - sd_store_cache_type), - __ATTR(FUA, S_IRUGO, sd_show_fua, NULL), - __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart, - sd_store_allow_restart), - __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, - sd_store_manage_start_stop), - __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type, - sd_store_protection_type), - __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), - __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), - __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), - __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, - sd_store_provisioning_mode), - __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR, - sd_show_write_same_blocks, sd_store_write_same_blocks), - __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR, - sd_show_max_medium_access_timeouts, - sd_store_max_medium_access_timeouts), - __ATTR_NULL, +static DEVICE_ATTR_RW(max_write_same_blocks); + +static struct attribute *sd_disk_attrs[] = { + &dev_attr_cache_type.attr, + &dev_attr_FUA.attr, + &dev_attr_allow_restart.attr, + &dev_attr_manage_start_stop.attr, + &dev_attr_protection_type.attr, + &dev_attr_protection_mode.attr, + &dev_attr_app_tag_own.attr, + &dev_attr_thin_provisioning.attr, + &dev_attr_provisioning_mode.attr, + &dev_attr_max_write_same_blocks.attr, + &dev_attr_max_medium_access_timeouts.attr, + NULL, }; +ATTRIBUTE_GROUPS(sd_disk); static struct class sd_disk_class = { .name = "scsi_disk", .owner = THIS_MODULE, .dev_release = scsi_disk_release, - .dev_attrs = sd_disk_attrs, + .dev_groups = sd_disk_groups, }; static const struct dev_pm_ops sd_pm_ops = { - .suspend = sd_suspend, + .suspend = sd_suspend_system, .resume = sd_resume, - .poweroff = sd_suspend, + .poweroff = sd_suspend_system, .restore = sd_resume, - .runtime_suspend = sd_suspend, + .runtime_suspend = sd_suspend_runtime, .runtime_resume = sd_resume, }; @@ -484,11 +505,23 @@ static struct scsi_driver sd_template = { .pm = &sd_pm_ops, }, .rescan = sd_rescan, + .init_command = sd_init_command, + .uninit_command = sd_uninit_command, .done = sd_done, .eh_action = sd_eh_action, }; /* + * Dummy kobj_map->probe function. + * The default ->probe function will call modprobe, which is + * pointless as this module is already loaded. + */ +static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data) +{ + return NULL; +} + +/* * Device no to disk mapping: * * major disc2 disc p1 @@ -708,16 +741,14 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) goto out; } + rq->completion_data = page; blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); - rq->buffer = page_address(page); rq->__data_len = nr_bytes; out: - if (ret != BLKPREP_OK) { + if (ret != BLKPREP_OK) __free_page(page); - rq->buffer = NULL; - } return ret; } @@ -725,7 +756,6 @@ static void sd_config_write_same(struct scsi_disk *sdkp) { struct request_queue *q = sdkp->disk->queue; unsigned int logical_block_size = sdkp->device->sector_size; - unsigned int blocks = 0; if (sdkp->device->no_write_same) { sdkp->max_ws_blocks = 0; @@ -737,18 +767,20 @@ static void sd_config_write_same(struct scsi_disk *sdkp) * blocks per I/O unless the device explicitly advertises a * bigger limit. */ - if (sdkp->max_ws_blocks == 0) - sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS; - - if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) - blocks = min_not_zero(sdkp->max_ws_blocks, - (u32)SD_MAX_WS16_BLOCKS); - else - blocks = min_not_zero(sdkp->max_ws_blocks, - (u32)SD_MAX_WS10_BLOCKS); + if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS) + sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS16_BLOCKS); + else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes) + sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks, + (u32)SD_MAX_WS10_BLOCKS); + else { + sdkp->device->no_write_same = 1; + sdkp->max_ws_blocks = 0; + } out: - blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9)); + blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks * + (logical_block_size >> 9)); } /** @@ -771,7 +803,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) if (sdkp->device->no_write_same) return BLKPREP_KILL; - BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); + BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); sector >>= ilog2(sdp->sector_size) - 9; nr_sectors >>= ilog2(sdp->sector_size) - 9; @@ -800,7 +832,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) { - rq->timeout = SD_FLUSH_TIMEOUT; + rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER; rq->retries = SD_MAX_RETRIES; rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd_len = 10; @@ -808,26 +840,24 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) return scsi_setup_blk_pc_cmnd(sdp, rq); } -static void sd_unprep_fn(struct request_queue *q, struct request *rq) +static void sd_uninit_command(struct scsi_cmnd *SCpnt) { - if (rq->cmd_flags & REQ_DISCARD) { - free_page((unsigned long)rq->buffer); - rq->buffer = NULL; + struct request *rq = SCpnt->request; + + if (rq->cmd_flags & REQ_DISCARD) + __free_page(rq->completion_data); + + if (SCpnt->cmnd != rq->cmd) { + mempool_free(SCpnt->cmnd, sd_cdb_pool); + SCpnt->cmnd = NULL; + SCpnt->cmd_len = 0; } } -/** - * sd_prep_fn - build a scsi (read or write) command from - * information in the request structure. - * @SCpnt: pointer to mid-level's per scsi command structure that - * contains request and into which the scsi command is written - * - * Returns 1 if successful and 0 if error (or cannot be done now). - **/ -static int sd_prep_fn(struct request_queue *q, struct request *rq) +static int sd_init_command(struct scsi_cmnd *SCpnt) { - struct scsi_cmnd *SCpnt; - struct scsi_device *sdp = q->queuedata; + struct request *rq = SCpnt->request; + struct scsi_device *sdp = SCpnt->device; struct gendisk *disk = rq->rq_disk; struct scsi_disk *sdkp; sector_t block = blk_rq_pos(rq); @@ -849,12 +879,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) } else if (rq->cmd_flags & REQ_FLUSH) { ret = scsi_setup_flush_cmnd(sdp, rq); goto out; - } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - ret = scsi_setup_blk_pc_cmnd(sdp, rq); - goto out; - } else if (rq->cmd_type != REQ_TYPE_FS) { - ret = BLKPREP_KILL; - goto out; } ret = scsi_setup_fs_cmnd(sdp, rq); if (ret != BLKPREP_OK) @@ -866,11 +890,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) * is used for a killable error condition */ ret = BLKPREP_KILL; - SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, - "sd_prep_fn: block=%llu, " - "count=%d\n", - (unsigned long long)block, - this_count)); + SCSI_LOG_HLQUEUE(1, + scmd_printk(KERN_INFO, SCpnt, + "%s: block=%llu, count=%d\n", + __func__, (unsigned long long)block, this_count)); if (!sdp || !scsi_device_online(sdp) || block + blk_rq_sectors(rq) > get_capacity(disk)) { @@ -966,7 +989,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) SCpnt->cmnd[0] = READ_6; SCpnt->sc_data_direction = DMA_FROM_DEVICE; } else { - scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags); + scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); goto out; } @@ -1090,7 +1113,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) */ ret = BLKPREP_OK; out: - return scsi_prep_return(q, rq, ret); + return ret; } /** @@ -1121,10 +1144,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) sdev = sdkp->device; - retval = scsi_autopm_get_device(sdev); - if (retval) - goto error_autopm; - /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. @@ -1169,8 +1188,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) return 0; error_out: - scsi_autopm_put_device(sdev); -error_autopm: scsi_disk_put(sdkp); return retval; } @@ -1188,7 +1205,7 @@ error_autopm: * * Locking: called with bdev->bd_mutex held. **/ -static int sd_release(struct gendisk *disk, fmode_t mode) +static void sd_release(struct gendisk *disk, fmode_t mode) { struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdev = sdkp->device; @@ -1205,9 +1222,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode) * XXX is followed by a "rmmod sd_mod"? */ - scsi_autopm_put_device(sdev); scsi_disk_put(sdkp); - return 0; } static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) @@ -1367,14 +1382,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) retval = -ENODEV; if (scsi_block_when_processing_errors(sdp)) { - retval = scsi_autopm_get_device(sdp); - if (retval) - goto out; - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, sshdr); - scsi_autopm_put_device(sdp); } /* failed to execute TUR, assume media not present */ @@ -1410,12 +1420,13 @@ static int sd_sync_cache(struct scsi_disk *sdkp) { int retries, res; struct scsi_device *sdp = sdkp->device; + const int timeout = sdp->request_queue->rq_timeout + * SD_FLUSH_TIMEOUT_MULTIPLIER; struct scsi_sense_hdr sshdr; if (!scsi_device_online(sdp)) return -ENODEV; - for (retries = 3; retries > 0; --retries) { unsigned char cmd[10] = { 0 }; @@ -1424,20 +1435,40 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, + &sshdr, timeout, SD_MAX_RETRIES, + NULL, REQ_PM); if (res == 0) break; } if (res) { sd_print_result(sdkp, res); + if (driver_byte(res) & DRIVER_SENSE) sd_print_sense_hdr(sdkp, &sshdr); + /* we need to evaluate the error return */ + if (scsi_sense_valid(&sshdr) && + (sshdr.asc == 0x3a || /* medium not present */ + sshdr.asc == 0x20)) /* invalid command */ + /* this is no error here */ + return 0; + + switch (host_byte(res)) { + /* ignore errors due to racing a disconnection */ + case DID_BAD_TARGET: + case DID_NO_CONNECT: + return 0; + /* signal the upper layer it might try again */ + case DID_BUS_BUSY: + case DID_IMM_RETRY: + case DID_REQUEUE: + case DID_SOFT_ERROR: + return -EBUSY; + default: + return -EIO; + } } - - if (res) - return -EIO; return 0; } @@ -1506,23 +1537,23 @@ static const struct block_device_operations sd_fops = { /** * sd_eh_action - error handling callback * @scmd: sd-issued command that has failed - * @eh_cmnd: The command that was sent during error handling - * @eh_cmnd_len: Length of eh_cmnd in bytes * @eh_disp: The recovery disposition suggested by the midlayer * - * This function is called by the SCSI midlayer upon completion of - * an error handling command (TEST UNIT READY, START STOP UNIT, - * etc.) The command sent to the device by the error handler is - * stored in eh_cmnd. The result of sending the eh command is - * passed in eh_disp. + * This function is called by the SCSI midlayer upon completion of an + * error test command (currently TEST UNIT READY). The result of sending + * the eh command is passed in eh_disp. We're looking for devices that + * fail medium access commands but are OK with non access commands like + * test unit ready (so wrongly see the device as having a successful + * recovery) **/ -static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd, - int eh_cmnd_len, int eh_disp) +static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) { struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); if (!scsi_device_online(scmd->device) || - !scsi_medium_access_command(scmd)) + !scsi_medium_access_command(scmd) || + host_byte(scmd->result) != DID_TIME_OUT || + eh_disp != SUCCESS) return eh_disp; /* @@ -1532,9 +1563,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd, * process of recovering or has it suffered an internal failure * that prevents access to the storage medium. */ - if (host_byte(scmd->result) == DID_TIME_OUT && eh_disp == SUCCESS && - eh_cmnd_len && eh_cmnd[0] == TEST_UNIT_READY) - sdkp->medium_access_timed_out++; + sdkp->medium_access_timed_out++; /* * If the device keeps failing read/write commands but TEST UNIT @@ -1583,7 +1612,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) end_lba <<= 1; } else { /* be careful ... don't want any overflows */ - u64 factor = scmd->device->sector_size / 512; + unsigned int factor = scmd->device->sector_size / 512; do_div(start_lba, factor); do_div(end_lba, factor); } @@ -1646,12 +1675,12 @@ static int sd_done(struct scsi_cmnd *SCpnt) sshdr.ascq)); } #endif + sdkp->medium_access_timed_out = 0; + if (driver_byte(result) != DRIVER_SENSE && (!sense_valid || sense_deferred)) goto out; - sdkp->medium_access_timed_out = 0; - switch (sshdr.sense_key) { case HARDWARE_ERROR: case MEDIUM_ERROR: @@ -1704,21 +1733,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) sd_dif_complete(SCpnt, good_bytes); - if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type) - == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) { - - /* We have to print a failed command here as the - * extended CDB gets freed before scsi_io_completion() - * is called. - */ - if (result) - scsi_print_command(SCpnt); - - mempool_free(SCpnt->cmnd, sd_cdb_pool); - SCpnt->cmnd = NULL; - SCpnt->cmd_len = 0; - } - return good_bytes; } @@ -2253,7 +2267,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) set_disk_ro(sdkp->disk, 0); if (sdp->skip_ms_page_3f) { - sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); + sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); return; } @@ -2285,7 +2299,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) } if (!scsi_status_is_good(res)) { - sd_printk(KERN_WARNING, sdkp, + sd_first_printk(KERN_WARNING, sdkp, "Test WP failed, assume Write Enabled\n"); } else { sdkp->write_prot = ((data.device_specific & 0x80) != 0); @@ -2319,6 +2333,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; + + if (sdkp->cache_override) + return; + first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) @@ -2349,7 +2367,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (!data.header_length) { modepage = 6; first_len = 0; - sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n"); + sd_first_printk(KERN_ERR, sdkp, + "Missing header in MODE_SENSE response\n"); } /* that went OK, now ask for the proper length */ @@ -2362,7 +2381,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (len < 3) goto bad_sense; else if (len > SD_BUF_SIZE) { - sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " + sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " "data from %d to %d bytes\n", len, SD_BUF_SIZE); len = SD_BUF_SIZE; } @@ -2385,8 +2404,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) /* We're interested only in the first 3 bytes. */ if (len - offset <= 2) { - sd_printk(KERN_ERR, sdkp, "Incomplete " - "mode parameter data\n"); + sd_first_printk(KERN_ERR, sdkp, + "Incomplete mode parameter " + "data\n"); goto defaults; } else { modepage = page_code; @@ -2400,21 +2420,17 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) else if (!spf && len - offset > 1) offset += 2 + buffer[offset+1]; else { - sd_printk(KERN_ERR, sdkp, "Incomplete " - "mode parameter data\n"); + sd_first_printk(KERN_ERR, sdkp, + "Incomplete mode " + "parameter data\n"); goto defaults; } } } - if (modepage == 0x3F) { - sd_printk(KERN_ERR, sdkp, "No Caching mode page " - "present\n"); - goto defaults; - } else if ((buffer[offset] & 0x3f) != modepage) { - sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); - goto defaults; - } + sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); + goto defaults; + Page_found: if (modepage == 8) { sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); @@ -2425,8 +2441,11 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) } sdkp->DPOFUA = (data.device_specific & 0x10) != 0; - if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { - sd_printk(KERN_NOTICE, sdkp, + if (sdp->broken_fua) { + sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); + sdkp->DPOFUA = 0; + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; } @@ -2448,16 +2467,19 @@ bad_sense: sshdr.sense_key == ILLEGAL_REQUEST && sshdr.asc == 0x24 && sshdr.ascq == 0x0) /* Invalid field in CDB */ - sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); + sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); else - sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n"); + sd_first_printk(KERN_ERR, sdkp, + "Asking for cache data failed\n"); defaults: if (sdp->wce_default_on) { - sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n"); + sd_first_printk(KERN_NOTICE, sdkp, + "Assuming drive cache: write back\n"); sdkp->WCE = 1; } else { - sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n"); + sd_first_printk(KERN_ERR, sdkp, + "Assuming drive cache: write through\n"); sdkp->WCE = 0; } sdkp->RCD = 0; @@ -2486,7 +2508,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) if (!scsi_status_is_good(res) || !data.header_length || data.length < 6) { - sd_printk(KERN_WARNING, sdkp, + sd_first_printk(KERN_WARNING, sdkp, "getting Control mode page failed, assume no ATO\n"); if (scsi_sense_valid(&sshdr)) @@ -2498,7 +2520,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) offset = data.header_length + data.block_descriptor_length; if ((buffer[offset] & 0x3f) != 0x0a) { - sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); + sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); return; } @@ -2628,9 +2650,33 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp) static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) { - if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE, - WRITE_SAME_16)) + struct scsi_device *sdev = sdkp->device; + + if (sdev->host->no_write_same) { + sdev->no_write_same = 1; + + return; + } + + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { + /* too large values might cause issues with arcmsr */ + int vpd_buf_len = 64; + + sdev->no_report_opcodes = 1; + + /* Disable WRITE SAME if REPORT SUPPORTED OPERATION + * CODES is unsupported and the device has an ATA + * Information VPD page (SAT). + */ + if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len)) + sdev->no_write_same = 1; + } + + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1) sdkp->ws16 = 1; + + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1) + sdkp->ws10 = 1; } static int sd_try_extended_inquiry(struct scsi_device *sdp) @@ -2812,6 +2858,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; + sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; @@ -2820,9 +2867,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_revalidate_disk(gd); - blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); - blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn); - gd->driverfs_dev = &sdp->sdev_gendev; gd->flags = GENHD_FL_EXT_DEVT; if (sdp->removable) { @@ -2830,6 +2874,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) gd->events |= DISK_EVENT_MEDIA_CHANGE; } + blk_pm_runtime_init(sdp->request_queue, dev); add_disk(gd); if (sdkp->capacity) sd_dif_config_host(sdkp); @@ -2922,7 +2967,7 @@ static int sd_probe(struct device *dev) device_initialize(&sdkp->dev); sdkp->dev.parent = dev; sdkp->dev.class = &sd_disk_class; - dev_set_name(&sdkp->dev, dev_name(dev)); + dev_set_name(&sdkp->dev, "%s", dev_name(dev)); if (device_add(&sdkp->dev)) goto out_free_index; @@ -2961,17 +3006,21 @@ static int sd_probe(struct device *dev) static int sd_remove(struct device *dev) { struct scsi_disk *sdkp; + dev_t devt; sdkp = dev_get_drvdata(dev); + devt = disk_devt(sdkp->disk); scsi_autopm_get_device(sdkp->device); + async_synchronize_full_domain(&scsi_sd_pm_domain); async_synchronize_full_domain(&scsi_sd_probe_domain); - blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); - blk_queue_unprep_rq(sdkp->device->request_queue, NULL); device_del(&sdkp->dev); del_gendisk(sdkp->disk); sd_shutdown(dev); + blk_register_region(devt, SD_MINORS, NULL, + sd_default_probe, NULL, NULL); + mutex_lock(&sd_ref_mutex); dev_set_drvdata(dev, NULL); put_device(&sdkp->dev); @@ -3021,16 +3070,24 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) if (!scsi_device_online(sdp)) return -ENODEV; - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, + SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); if (res) { sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); sd_print_result(sdkp, res); if (driver_byte(res) & DRIVER_SENSE) sd_print_sense_hdr(sdkp, &sshdr); + if (scsi_sense_valid(&sshdr) && + /* 0x3a is medium not present */ + sshdr.asc == 0x3a) + res = 0; } - return res; + /* SCSI error codes must not go to the generic layer */ + if (res) + return -EIO; + + return 0; } /* @@ -3048,7 +3105,7 @@ static void sd_shutdown(struct device *dev) if (pm_runtime_suspended(dev)) goto exit; - if (sdkp->WCE) { + if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); sd_sync_cache(sdkp); } @@ -3062,7 +3119,7 @@ exit: scsi_disk_put(sdkp); } -static int sd_suspend(struct device *dev) +static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) { struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); int ret = 0; @@ -3070,16 +3127,23 @@ static int sd_suspend(struct device *dev) if (!sdkp) return 0; /* this can happen */ - if (sdkp->WCE) { + if (sdkp->WCE && sdkp->media_present) { sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); ret = sd_sync_cache(sdkp); - if (ret) + if (ret) { + /* ignore OFFLINE device */ + if (ret == -ENODEV) + ret = 0; goto done; + } } if (sdkp->device->manage_start_stop) { sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); + /* an error is not worth aborting a system sleep */ ret = sd_start_stop_device(sdkp, 0); + if (ignore_stop_errors) + ret = 0; } done: @@ -3087,6 +3151,16 @@ done: return ret; } +static int sd_suspend_system(struct device *dev) +{ + return sd_suspend_common(dev, true); +} + +static int sd_suspend_runtime(struct device *dev) +{ + return sd_suspend_common(dev, false); +} + static int sd_resume(struct device *dev) { struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); @@ -3115,9 +3189,13 @@ static int __init init_sd(void) SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n")); - for (i = 0; i < SD_MAJORS; i++) - if (register_blkdev(sd_major(i), "sd") == 0) - majors++; + for (i = 0; i < SD_MAJORS; i++) { + if (register_blkdev(sd_major(i), "sd") != 0) + continue; + majors++; + blk_register_region(sd_major(i), SD_MINORS, NULL, + sd_default_probe, NULL, NULL); + } if (!majors) return -ENODEV; @@ -3176,8 +3254,10 @@ static void __exit exit_sd(void) class_unregister(&sd_disk_class); - for (i = 0; i < SD_MAJORS; i++) + for (i = 0; i < SD_MAJORS; i++) { + blk_unregister_region(sd_major(i), SD_MINORS); unregister_blkdev(sd_major(i), "sd"); + } } module_init(init_sd); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 74a1e4ca540..620871efbf0 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -13,7 +13,11 @@ */ #define SD_TIMEOUT (30 * HZ) #define SD_MOD_TIMEOUT (75 * HZ) -#define SD_FLUSH_TIMEOUT (60 * HZ) +/* + * Flush timeout is a multiplier over the standard device timeout which is + * user modifiable via sysfs but initially set to SD_TIMEOUT + */ +#define SD_FLUSH_TIMEOUT_MULTIPLIER 2 #define SD_WRITE_SAME_TIMEOUT (120 * HZ) /* @@ -73,6 +77,7 @@ struct scsi_disk { u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ @@ -83,6 +88,7 @@ struct scsi_disk { unsigned lbpws : 1; unsigned lbpws10 : 1; unsigned lbpvpd : 1; + unsigned ws10 : 1; unsigned ws16 : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) @@ -98,6 +104,12 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk) (sdsk)->disk->disk_name, ##a) : \ sdev_printk(prefix, (sdsk)->device, fmt, ##a) +#define sd_first_printk(prefix, sdsk, fmt, a...) \ + do { \ + if ((sdkp)->first_scan) \ + sd_printk(prefix, sdsk, fmt, ##a); \ + } while (0) + static inline int scsi_medium_access_command(struct scsi_cmnd *scmd) { switch (scmd->cmnd[0]) { diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 04998f36e50..a7a691d0af7 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn) if (sdt->app_tag == 0xffff) return 0; - /* Bad ref tag received from disk */ - if (sdt->ref_tag == 0xffffffff) { - printk(KERN_ERR - "%s: bad phys ref tag on sector %lu\n", - bix->disk_name, (unsigned long)sector); - return -EIO; - } - if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { printk(KERN_ERR "%s: ref tag error on sector %lu (rcvd %u)\n", @@ -373,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector, struct bio *bio; struct scsi_disk *sdkp; struct sd_dif_tuple *sdt; - unsigned int i, j; u32 phys, virt; sdkp = rq->bio->bi_bdev->bd_disk->private_data; @@ -384,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector, phys = hw_sector & 0xffffffff; __rq_for_each_bio(bio, rq) { - struct bio_vec *iv; + struct bio_vec iv; + struct bvec_iter iter; + unsigned int j; /* Already remapped? */ if (bio_flagged(bio, BIO_MAPPED_INTEGRITY)) break; - virt = bio->bi_integrity->bip_sector & 0xffffffff; + virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff; - bip_for_each_vec(iv, bio->bi_integrity, i) { - sdt = kmap_atomic(iv->bv_page) - + iv->bv_offset; + bip_for_each_vec(iv, bio->bi_integrity, iter) { + sdt = kmap_atomic(iv.bv_page) + + iv.bv_offset; - for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { + for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) { if (be32_to_cpu(sdt->ref_tag) == virt) sdt->ref_tag = cpu_to_be32(phys); @@ -422,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) struct scsi_disk *sdkp; struct bio *bio; struct sd_dif_tuple *sdt; - unsigned int i, j, sectors, sector_sz; + unsigned int j, sectors, sector_sz; u32 phys, virt; sdkp = scsi_disk(scmd->request->rq_disk); @@ -438,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) phys >>= 3; __rq_for_each_bio(bio, scmd->request) { - struct bio_vec *iv; + struct bio_vec iv; + struct bvec_iter iter; - virt = bio->bi_integrity->bip_sector & 0xffffffff; + virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff; - bip_for_each_vec(iv, bio->bi_integrity, i) { - sdt = kmap_atomic(iv->bv_page) - + iv->bv_offset; + bip_for_each_vec(iv, bio->bi_integrity, iter) { + sdt = kmap_atomic(iv.bv_page) + + iv.bv_offset; - for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { + for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) { if (sectors == 0) { kunmap_atomic(sdt); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index eba183c428c..80bfece1a2d 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/enclosure.h> +#include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -448,27 +449,18 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, static void ses_match_to_enclosure(struct enclosure_device *edev, struct scsi_device *sdev) { - unsigned char *buf; unsigned char *desc; - unsigned int vpd_len; struct efd efd = { .addr = 0, }; - buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL); - if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE)) - goto free; - ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); - vpd_len = ((buf[2] << 8) | buf[3]) + 4; - kfree(buf); - buf = kmalloc(vpd_len, GFP_KERNEL); - if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len)) - goto free; + if (!sdev->vpd_pg83_len) + return; - desc = buf + 4; - while (desc < buf + vpd_len) { + desc = sdev->vpd_pg83 + 4; + while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) { enum scsi_protocol proto = desc[0] >> 4; u8 code_set = desc[0] & 0x0f; u8 piv = desc[1] & 0x80; @@ -478,25 +470,15 @@ static void ses_match_to_enclosure(struct enclosure_device *edev, if (piv && code_set == 1 && assoc == 1 && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8) - efd.addr = (u64)desc[4] << 56 | - (u64)desc[5] << 48 | - (u64)desc[6] << 40 | - (u64)desc[7] << 32 | - (u64)desc[8] << 24 | - (u64)desc[9] << 16 | - (u64)desc[10] << 8 | - (u64)desc[11]; + efd.addr = get_unaligned_be64(&desc[4]); desc += len + 4; } - if (!efd.addr) - goto free; + if (efd.addr) { + efd.dev = &sdev->sdev_gendev; - efd.dev = &sdev->sdev_gendev; - - enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); - free: - kfree(buf); + enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); + } } static int ses_intf_add(struct device *cdev, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 9f0c4654745..53268aaba55 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -35,6 +35,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */ #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> +#include <linux/aio.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> @@ -1652,10 +1653,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) if (!rq) return -ENOMEM; + blk_rq_set_block_pc(rq); memcpy(rq->cmd, cmd, hp->cmd_len); - rq->cmd_len = hp->cmd_len; - rq->cmd_type = REQ_TYPE_BLOCK_PC; srp->rq = rq; rq->end_io_data = srp; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index f2884ee9071..93cbd36c990 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -79,6 +79,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM); static DEFINE_MUTEX(sr_mutex); static int sr_probe(struct device *); static int sr_remove(struct device *); +static int sr_init_command(struct scsi_cmnd *SCpnt); static int sr_done(struct scsi_cmnd *); static int sr_runtime_suspend(struct device *dev); @@ -94,6 +95,7 @@ static struct scsi_driver sr_template = { .remove = sr_remove, .pm = &sr_pm_ops, }, + .init_command = sr_init_command, .done = sr_done, }; @@ -161,14 +163,10 @@ static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk) goto out; cd = scsi_cd(disk); kref_get(&cd->kref); - if (scsi_device_get(cd->device)) - goto out_put; - if (!scsi_autopm_get_device(cd->device)) - goto out; - - out_put: - kref_put(&cd->kref, sr_kref_release); - cd = NULL; + if (scsi_device_get(cd->device)) { + kref_put(&cd->kref, sr_kref_release); + cd = NULL; + } out: mutex_unlock(&sr_ref_mutex); return cd; @@ -180,7 +178,6 @@ static void scsi_cd_put(struct scsi_cd *cd) mutex_lock(&sr_ref_mutex); kref_put(&cd->kref, sr_kref_release); - scsi_autopm_put_device(sdev); scsi_device_put(sdev); mutex_unlock(&sr_ref_mutex); } @@ -383,21 +380,14 @@ static int sr_done(struct scsi_cmnd *SCpnt) return good_bytes; } -static int sr_prep_fn(struct request_queue *q, struct request *rq) +static int sr_init_command(struct scsi_cmnd *SCpnt) { int block = 0, this_count, s_size; struct scsi_cd *cd; - struct scsi_cmnd *SCpnt; - struct scsi_device *sdp = q->queuedata; + struct request *rq = SCpnt->request; + struct scsi_device *sdp = SCpnt->device; int ret; - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - ret = scsi_setup_blk_pc_cmnd(sdp, rq); - goto out; - } else if (rq->cmd_type != REQ_TYPE_FS) { - ret = BLKPREP_KILL; - goto out; - } ret = scsi_setup_fs_cmnd(sdp, rq); if (ret != BLKPREP_OK) goto out; @@ -522,7 +512,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq) */ ret = BLKPREP_OK; out: - return scsi_prep_return(q, rq, ret); + return ret; } static int sr_block_open(struct block_device *bdev, fmode_t mode) @@ -541,14 +531,13 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) return ret; } -static int sr_block_release(struct gendisk *disk, fmode_t mode) +static void sr_block_release(struct gendisk *disk, fmode_t mode) { struct scsi_cd *cd = scsi_cd(disk); mutex_lock(&sr_mutex); cdrom_release(&cd->cdi, mode); scsi_cd_put(cd); mutex_unlock(&sr_mutex); - return 0; } static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, @@ -559,8 +548,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, void __user *argp = (void __user *)arg; int ret; - scsi_autopm_get_device(cd->device); - mutex_lock(&sr_mutex); /* @@ -592,7 +579,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, out: mutex_unlock(&sr_mutex); - scsi_autopm_put_device(cd->device); return ret; } @@ -600,17 +586,11 @@ static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) { struct scsi_cd *cd = scsi_cd(disk); - unsigned int ret; - if (atomic_read(&cd->device->disk_events_disable_depth) == 0) { - scsi_autopm_get_device(cd->device); - ret = cdrom_check_events(&cd->cdi, clearing); - scsi_autopm_put_device(cd->device); - } else { - ret = 0; - } + if (atomic_read(&cd->device->disk_events_disable_depth)) + return 0; - return ret; + return cdrom_check_events(&cd->cdi, clearing); } static int sr_block_revalidate_disk(struct gendisk *disk) @@ -618,8 +598,6 @@ static int sr_block_revalidate_disk(struct gendisk *disk) struct scsi_cd *cd = scsi_cd(disk); struct scsi_sense_hdr sshdr; - scsi_autopm_get_device(cd->device); - /* if the unit is not ready, nothing more to do */ if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr)) goto out; @@ -627,7 +605,6 @@ static int sr_block_revalidate_disk(struct gendisk *disk) sr_cd_check(&cd->cdi); get_sectorsize(cd); out: - scsi_autopm_put_device(cd->device); return 0; } @@ -736,7 +713,6 @@ static int sr_probe(struct device *dev) /* FIXME: need to handle a get_capabilities failure properly ?? */ get_capabilities(cd); - blk_queue_prep_rq(sdev->request_queue, sr_prep_fn); sr_vendor_init(cd); disk->driverfs_dev = &sdev->sdev_gendev; @@ -748,6 +724,12 @@ static int sr_probe(struct device *dev) if (register_cdrom(&cd->cdi)) goto fail_put; + /* + * Initialize block layer runtime PM stuffs before the + * periodic event checking request gets started in add_disk. + */ + blk_pm_runtime_init(sdev->request_queue, dev); + dev_set_drvdata(dev, cd); disk->flags |= GENHD_FL_REMOVABLE; add_disk(disk); @@ -1005,7 +987,6 @@ static int sr_remove(struct device *dev) scsi_autopm_get_device(cd->device); - blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); del_gendisk(cd->disk); mutex_lock(&sr_ref_mutex); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 86974471af6..14eb4b256a0 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -82,7 +82,7 @@ static int try_rdio = 1; static int try_wdio = 1; static struct class st_sysfs_class; -static struct device_attribute st_dev_attrs[]; +static const struct attribute_group *st_dev_groups[]; MODULE_AUTHOR("Kai Makisara"); MODULE_DESCRIPTION("SCSI tape (st) driver"); @@ -484,7 +484,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, if (!req) return DRIVER_ERROR << 24; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_QUIET; mdata->null_mapped = 1; @@ -2198,12 +2198,19 @@ static int st_set_options(struct scsi_tape *STp, long options) struct st_modedef *STm; char *name = tape_name(STp); struct cdev *cd0, *cd1; + struct device *d0, *d1; STm = &(STp->modes[STp->current_mode]); if (!STm->defined) { - cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1]; + cd0 = STm->cdevs[0]; + cd1 = STm->cdevs[1]; + d0 = STm->devs[0]; + d1 = STm->devs[1]; memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef)); - STm->cdevs[0] = cd0; STm->cdevs[1] = cd1; + STm->cdevs[0] = cd0; + STm->cdevs[1] = cd1; + STm->devs[0] = d0; + STm->devs[1] = d1; modes_defined = 1; DEBC(printk(ST_DEB_MSG "%s: Initialized mode %d definition from mode 0\n", @@ -3719,7 +3726,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg) static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma) { - int segs, nbr, max_segs, b_size, order, got; + int segs, max_segs, b_size, order, got; gfp_t priority; if (new_size <= STbuffer->buffer_size) @@ -3729,9 +3736,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm normalize_buffer(STbuffer); /* Avoid extra segment */ max_segs = STbuffer->use_sg; - nbr = max_segs - STbuffer->frp_segs; - if (nbr <= 0) - return 0; priority = GFP_KERNEL | __GFP_NOWARN; if (need_dma) @@ -4112,6 +4116,10 @@ static int st_probe(struct device *dev) tpnt->disk = disk; disk->private_data = &tpnt->driver; disk->queue = SDp->request_queue; + /* SCSI tape doesn't register this gendisk via add_disk(). Manually + * take queue reference that release_disk() expects. */ + if (!blk_get_queue(disk->queue)) + goto out_put_disk; tpnt->driver = &st_template; tpnt->device = SDp; @@ -4185,7 +4193,7 @@ static int st_probe(struct device *dev) idr_preload_end(); if (error < 0) { pr_warn("st: idr allocation failed: %d\n", error); - goto out_put_disk; + goto out_put_queue; } tpnt->index = error; sprintf(disk->disk_name, "st%d", tpnt->index); @@ -4211,6 +4219,8 @@ out_remove_devs: spin_lock(&st_index_lock); idr_remove(&st_index_idr, tpnt->index); spin_unlock(&st_index_lock); +out_put_queue: + blk_put_queue(disk->queue); out_put_disk: put_disk(disk); kfree(tpnt); @@ -4268,7 +4278,7 @@ static void scsi_tape_release(struct kref *kref) static struct class st_sysfs_class = { .name = "scsi_tape", - .dev_attrs = st_dev_attrs, + .dev_groups = st_dev_groups, }; static int __init init_st(void) @@ -4402,6 +4412,7 @@ defined_show(struct device *dev, struct device_attribute *attr, char *buf) l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); return l; } +static DEVICE_ATTR_RO(defined); static ssize_t default_blksize_show(struct device *dev, struct device_attribute *attr, @@ -4413,7 +4424,7 @@ default_blksize_show(struct device *dev, struct device_attribute *attr, l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); return l; } - +static DEVICE_ATTR_RO(default_blksize); static ssize_t default_density_show(struct device *dev, struct device_attribute *attr, @@ -4427,6 +4438,7 @@ default_density_show(struct device *dev, struct device_attribute *attr, l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density); return l; } +static DEVICE_ATTR_RO(default_density); static ssize_t default_compression_show(struct device *dev, struct device_attribute *attr, @@ -4438,6 +4450,7 @@ default_compression_show(struct device *dev, struct device_attribute *attr, l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); return l; } +static DEVICE_ATTR_RO(default_compression); static ssize_t options_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -4466,15 +4479,17 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf) l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); return l; } - -static struct device_attribute st_dev_attrs[] = { - __ATTR_RO(defined), - __ATTR_RO(default_blksize), - __ATTR_RO(default_density), - __ATTR_RO(default_compression), - __ATTR_RO(options), - __ATTR_NULL, +static DEVICE_ATTR_RO(options); + +static struct attribute *st_dev_attrs[] = { + &dev_attr_defined.attr, + &dev_attr_default_blksize.attr, + &dev_attr_default_density.attr, + &dev_attr_default_compression.attr, + &dev_attr_options.attr, + NULL, }; +ATTRIBUTE_GROUPS(st_dev); /* The following functions may be useful for a larger audience. */ static int sgl_map_user_pages(struct st_buffer *STbp, diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 325c31caa6e..1aa4befcfbd 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -1790,8 +1790,6 @@ static void stex_remove(struct pci_dev *pdev) scsi_remove_host(hba->host); - pci_set_drvdata(pdev, NULL); - stex_hba_stop(hba); stex_hba_free(hba); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 16a3a0cc967..9969fa1ef7c 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -55,10 +55,15 @@ * V1 RC < 2008/1/31: 1.0 * V1 RC > 2008/1/31: 2.0 * Win7: 4.2 + * Win8: 5.1 */ -#define VMSTOR_CURRENT_MAJOR 4 -#define VMSTOR_CURRENT_MINOR 2 + +#define VMSTOR_WIN7_MAJOR 4 +#define VMSTOR_WIN7_MINOR 2 + +#define VMSTOR_WIN8_MAJOR 5 +#define VMSTOR_WIN8_MINOR 1 /* Packet structure describing virtual storage requests. */ @@ -74,18 +79,103 @@ enum vstor_packet_operation { VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9, VSTOR_OPERATION_QUERY_PROPERTIES = 10, VSTOR_OPERATION_ENUMERATE_BUS = 11, - VSTOR_OPERATION_MAXIMUM = 11 + VSTOR_OPERATION_FCHBA_DATA = 12, + VSTOR_OPERATION_CREATE_SUB_CHANNELS = 13, + VSTOR_OPERATION_MAXIMUM = 13 +}; + +/* + * WWN packet for Fibre Channel HBA + */ + +struct hv_fc_wwn_packet { + bool primary_active; + u8 reserved1; + u8 reserved2; + u8 primary_port_wwn[8]; + u8 primary_node_wwn[8]; + u8 secondary_port_wwn[8]; + u8 secondary_node_wwn[8]; }; + + +/* + * SRB Flag Bits + */ + +#define SRB_FLAGS_QUEUE_ACTION_ENABLE 0x00000002 +#define SRB_FLAGS_DISABLE_DISCONNECT 0x00000004 +#define SRB_FLAGS_DISABLE_SYNCH_TRANSFER 0x00000008 +#define SRB_FLAGS_BYPASS_FROZEN_QUEUE 0x00000010 +#define SRB_FLAGS_DISABLE_AUTOSENSE 0x00000020 +#define SRB_FLAGS_DATA_IN 0x00000040 +#define SRB_FLAGS_DATA_OUT 0x00000080 +#define SRB_FLAGS_NO_DATA_TRANSFER 0x00000000 +#define SRB_FLAGS_UNSPECIFIED_DIRECTION (SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT) +#define SRB_FLAGS_NO_QUEUE_FREEZE 0x00000100 +#define SRB_FLAGS_ADAPTER_CACHE_ENABLE 0x00000200 +#define SRB_FLAGS_FREE_SENSE_BUFFER 0x00000400 + +/* + * This flag indicates the request is part of the workflow for processing a D3. + */ +#define SRB_FLAGS_D3_PROCESSING 0x00000800 +#define SRB_FLAGS_IS_ACTIVE 0x00010000 +#define SRB_FLAGS_ALLOCATED_FROM_ZONE 0x00020000 +#define SRB_FLAGS_SGLIST_FROM_POOL 0x00040000 +#define SRB_FLAGS_BYPASS_LOCKED_QUEUE 0x00080000 +#define SRB_FLAGS_NO_KEEP_AWAKE 0x00100000 +#define SRB_FLAGS_PORT_DRIVER_ALLOCSENSE 0x00200000 +#define SRB_FLAGS_PORT_DRIVER_SENSEHASPORT 0x00400000 +#define SRB_FLAGS_DONT_START_NEXT_PACKET 0x00800000 +#define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000 +#define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000 + + /* * Platform neutral description of a scsi request - * this remains the same across the write regardless of 32/64 bit * note: it's patterned off the SCSI_PASS_THROUGH structure */ #define STORVSC_MAX_CMD_LEN 0x10 -#define STORVSC_SENSE_BUFFER_SIZE 0x12 + +#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14 +#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12 + +#define STORVSC_SENSE_BUFFER_SIZE 0x14 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 +/* + * Sense buffer size changed in win8; have a run-time + * variable to track the size we should use. + */ +static int sense_buffer_size; + +/* + * The size of the vmscsi_request has changed in win8. The + * additional size is because of new elements added to the + * structure. These elements are valid only when we are talking + * to a win8 host. + * Track the correction to size we need to apply. + */ + +static int vmscsi_size_delta; +static int vmstor_current_major; +static int vmstor_current_minor; + +struct vmscsi_win8_extension { + /* + * The following were added in Windows 8 + */ + u16 reserve; + u8 queue_tag; + u8 queue_action; + u32 srb_flags; + u32 time_out_value; + u32 queue_sort_ey; +} __packed; + struct vmscsi_request { u16 length; u8 srb_status; @@ -108,6 +198,11 @@ struct vmscsi_request { u8 sense_data[STORVSC_SENSE_BUFFER_SIZE]; u8 reserved_array[STORVSC_MAX_BUF_LEN_WITH_PADDING]; }; + /* + * The following was added in win8. + */ + struct vmscsi_win8_extension win8_extension; + } __attribute((packed)); @@ -115,22 +210,18 @@ struct vmscsi_request { * This structure is sent during the intialization phase to get the different * properties of the channel. */ + +#define STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL 0x1 + struct vmstorage_channel_properties { - u16 protocol_version; - u8 path_id; - u8 target_id; + u32 reserved; + u16 max_channel_cnt; + u16 reserved1; - /* Note: port number is only really known on the client side */ - u32 port_number; - u32 flags; + u32 flags; u32 max_transfer_bytes; - /* - * This id is unique for each channel and will correspond with - * vendor specific data in the inquiry data. - */ - - u64 unique_id; + u64 reserved2; } __packed; /* This structure is sent during the storage protocol negotiations. */ @@ -175,6 +266,15 @@ struct vstor_packet { /* Used during version negotiations. */ struct vmstorage_protocol_version version; + + /* Fibre channel address packet */ + struct hv_fc_wwn_packet wwn_packet; + + /* Number of sub-channels to create */ + u16 sub_channel_count; + + /* This will be the maximum of the union members */ + u8 buffer[0x34]; }; } __packed; @@ -221,7 +321,14 @@ static int storvsc_ringbuffer_size = (20 * PAGE_SIZE); module_param(storvsc_ringbuffer_size, int, S_IRUGO); MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); -#define STORVSC_MAX_IO_REQUESTS 128 +/* + * Timeout in seconds for all devices managed by this driver. + */ +static int storvsc_timeout = 180; + +#define STORVSC_MAX_IO_REQUESTS 200 + +static void storvsc_on_channel_callback(void *context); /* * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In @@ -259,6 +366,7 @@ struct storvsc_device { bool destroy; bool drain_notify; + bool open_sub_channel; atomic_t num_outstanding_req; struct Scsi_Host *host; @@ -650,12 +758,104 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, return total_copied; } +static void handle_sc_creation(struct vmbus_channel *new_sc) +{ + struct hv_device *device = new_sc->primary_channel->device_obj; + struct storvsc_device *stor_device; + struct vmstorage_channel_properties props; + + stor_device = get_out_stor_device(device); + if (!stor_device) + return; + + if (stor_device->open_sub_channel == false) + return; + + memset(&props, 0, sizeof(struct vmstorage_channel_properties)); + + vmbus_open(new_sc, + storvsc_ringbuffer_size, + storvsc_ringbuffer_size, + (void *)&props, + sizeof(struct vmstorage_channel_properties), + storvsc_on_channel_callback, new_sc); +} + +static void handle_multichannel_storage(struct hv_device *device, int max_chns) +{ + struct storvsc_device *stor_device; + int num_cpus = num_online_cpus(); + int num_sc; + struct storvsc_cmd_request *request; + struct vstor_packet *vstor_packet; + int ret, t; + + num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); + stor_device = get_out_stor_device(device); + if (!stor_device) + return; + + request = &stor_device->init_request; + vstor_packet = &request->vstor_packet; + + stor_device->open_sub_channel = true; + /* + * Establish a handler for dealing with subchannels. + */ + vmbus_set_sc_create_callback(device->channel, handle_sc_creation); + + /* + * Check to see if sub-channels have already been created. This + * can happen when this driver is re-loaded after unloading. + */ + + if (vmbus_are_subchannels_present(device->channel)) + return; + + stor_device->open_sub_channel = false; + /* + * Request the host to create sub-channels. + */ + memset(request, 0, sizeof(struct storvsc_cmd_request)); + init_completion(&request->wait_event); + vstor_packet->operation = VSTOR_OPERATION_CREATE_SUB_CHANNELS; + vstor_packet->flags = REQUEST_COMPLETION_FLAG; + vstor_packet->sub_channel_count = num_sc; + + ret = vmbus_sendpacket(device->channel, vstor_packet, + (sizeof(struct vstor_packet) - + vmscsi_size_delta), + (unsigned long)request, + VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + + if (ret != 0) + return; + + t = wait_for_completion_timeout(&request->wait_event, 10*HZ); + if (t == 0) + return; + + if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || + vstor_packet->status != 0) + return; + + /* + * Now that we created the sub-channels, invoke the check; this + * may trigger the callback. + */ + stor_device->open_sub_channel = true; + vmbus_are_subchannels_present(device->channel); +} + static int storvsc_channel_init(struct hv_device *device) { struct storvsc_device *stor_device; struct storvsc_cmd_request *request; struct vstor_packet *vstor_packet; int ret, t; + int max_chns; + bool process_sub_channels = false; stor_device = get_out_stor_device(device); if (!stor_device) @@ -674,7 +874,8 @@ static int storvsc_channel_init(struct hv_device *device) vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = vmbus_sendpacket(device->channel, vstor_packet, - sizeof(struct vstor_packet), + (sizeof(struct vstor_packet) - + vmscsi_size_delta), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -698,7 +899,7 @@ static int storvsc_channel_init(struct hv_device *device) vstor_packet->flags = REQUEST_COMPLETION_FLAG; vstor_packet->version.major_minor = - storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR); + storvsc_get_version(vmstor_current_major, vmstor_current_minor); /* * The revision number is only used in Windows; set it to 0. @@ -706,7 +907,8 @@ static int storvsc_channel_init(struct hv_device *device) vstor_packet->version.revision = 0; ret = vmbus_sendpacket(device->channel, vstor_packet, - sizeof(struct vstor_packet), + (sizeof(struct vstor_packet) - + vmscsi_size_delta), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -727,11 +929,10 @@ static int storvsc_channel_init(struct hv_device *device) memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES; vstor_packet->flags = REQUEST_COMPLETION_FLAG; - vstor_packet->storage_channel_properties.port_number = - stor_device->port_number; ret = vmbus_sendpacket(device->channel, vstor_packet, - sizeof(struct vstor_packet), + (sizeof(struct vstor_packet) - + vmscsi_size_delta), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -749,16 +950,26 @@ static int storvsc_channel_init(struct hv_device *device) vstor_packet->status != 0) goto cleanup; - stor_device->path_id = vstor_packet->storage_channel_properties.path_id; - stor_device->target_id - = vstor_packet->storage_channel_properties.target_id; + /* + * Check to see if multi-channel support is there. + * Hosts that implement protocol version of 5.1 and above + * support multi-channel. + */ + max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; + if ((vmbus_proto_version != VERSION_WIN7) && + (vmbus_proto_version != VERSION_WS2008)) { + if (vstor_packet->storage_channel_properties.flags & + STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) + process_sub_channels = true; + } memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION; vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = vmbus_sendpacket(device->channel, vstor_packet, - sizeof(struct vstor_packet), + (sizeof(struct vstor_packet) - + vmscsi_size_delta), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -776,6 +987,9 @@ static int storvsc_channel_init(struct hv_device *device) vstor_packet->status != 0) goto cleanup; + if (process_sub_channels) + handle_multichannel_storage(device, max_chns); + cleanup: return ret; @@ -997,7 +1211,8 @@ static void storvsc_on_receive(struct hv_device *device, static void storvsc_on_channel_callback(void *context) { - struct hv_device *device = (struct hv_device *)context; + struct vmbus_channel *channel = (struct vmbus_channel *)context; + struct hv_device *device; struct storvsc_device *stor_device; u32 bytes_recvd; u64 request_id; @@ -1005,14 +1220,19 @@ static void storvsc_on_channel_callback(void *context) struct storvsc_cmd_request *request; int ret; + if (channel->primary_channel != NULL) + device = channel->primary_channel->device_obj; + else + device = channel->device_obj; stor_device = get_in_stor_device(device); if (!stor_device) return; do { - ret = vmbus_recvpacket(device->channel, packet, - ALIGN(sizeof(struct vstor_packet), 8), + ret = vmbus_recvpacket(channel, packet, + ALIGN((sizeof(struct vstor_packet) - + vmscsi_size_delta), 8), &bytes_recvd, &request_id); if (ret == 0 && bytes_recvd > 0) { @@ -1023,7 +1243,8 @@ static void storvsc_on_channel_callback(void *context) (request == &stor_device->reset_request)) { memcpy(&request->vstor_packet, packet, - sizeof(struct vstor_packet)); + (sizeof(struct vstor_packet) - + vmscsi_size_delta)); complete(&request->wait_event); } else { storvsc_on_receive(device, @@ -1050,7 +1271,7 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size) ring_size, (void *)&props, sizeof(struct vmstorage_channel_properties), - storvsc_on_channel_callback, device); + storvsc_on_channel_callback, device->channel); if (ret != 0) return ret; @@ -1102,6 +1323,7 @@ static int storvsc_do_io(struct hv_device *device, { struct storvsc_device *stor_device; struct vstor_packet *vstor_packet; + struct vmbus_channel *outgoing_channel; int ret = 0; vstor_packet = &request->vstor_packet; @@ -1112,14 +1334,20 @@ static int storvsc_do_io(struct hv_device *device, request->device = device; + /* + * Select an an appropriate channel to send the request out. + */ + + outgoing_channel = vmbus_get_outgoing_channel(device->channel); vstor_packet->flags |= REQUEST_COMPLETION_FLAG; - vstor_packet->vm_srb.length = sizeof(struct vmscsi_request); + vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) - + vmscsi_size_delta); - vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE; + vstor_packet->vm_srb.sense_info_length = sense_buffer_size; vstor_packet->vm_srb.data_transfer_length = @@ -1128,14 +1356,16 @@ static int storvsc_do_io(struct hv_device *device, vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB; if (request->data_buffer.len) { - ret = vmbus_sendpacket_multipagebuffer(device->channel, + ret = vmbus_sendpacket_multipagebuffer(outgoing_channel, &request->data_buffer, vstor_packet, - sizeof(struct vstor_packet), + (sizeof(struct vstor_packet) - + vmscsi_size_delta), (unsigned long)request); } else { ret = vmbus_sendpacket(device->channel, vstor_packet, - sizeof(struct vstor_packet), + (sizeof(struct vstor_packet) - + vmscsi_size_delta), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -1189,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice) { struct stor_mem_pools *memp = sdevice->hostdata; + if (!memp) + return; + mempool_destroy(memp->request_mempool); kmem_cache_destroy(memp->request_pool); kfree(memp); @@ -1204,6 +1437,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice) blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); + blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ)); + sdevice->no_write_same = 1; return 0; @@ -1257,7 +1492,8 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) vstor_packet->vm_srb.path_id = stor_device->path_id; ret = vmbus_sendpacket(device->channel, vstor_packet, - sizeof(struct vstor_packet), + (sizeof(struct vstor_packet) - + vmscsi_size_delta), (unsigned long)&stor_device->reset_request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -1342,18 +1578,28 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) scmnd->host_scribble = (unsigned char *)cmd_request; vm_srb = &cmd_request->vstor_packet.vm_srb; + vm_srb->win8_extension.time_out_value = 60; /* Build the SRB */ switch (scmnd->sc_data_direction) { case DMA_TO_DEVICE: vm_srb->data_in = WRITE_TYPE; + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; + vm_srb->win8_extension.srb_flags |= + (SRB_FLAGS_QUEUE_ACTION_ENABLE | + SRB_FLAGS_DISABLE_SYNCH_TRANSFER); break; case DMA_FROM_DEVICE: vm_srb->data_in = READ_TYPE; + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; + vm_srb->win8_extension.srb_flags |= + (SRB_FLAGS_QUEUE_ACTION_ENABLE | + SRB_FLAGS_DISABLE_SYNCH_TRANSFER); break; default: vm_srb->data_in = UNKNOWN_TYPE; + vm_srb->win8_extension.srb_flags = 0; break; } @@ -1454,11 +1700,13 @@ static struct scsi_host_template scsi_driver = { .use_clustering = DISABLE_CLUSTERING, /* Make sure we dont get a sg segment crosses a page boundary */ .dma_boundary = PAGE_SIZE-1, + .no_write_same = 1, }; enum { SCSI_GUID, IDE_GUID, + SFC_GUID, }; static const struct hv_vmbus_device_id id_table[] = { @@ -1470,6 +1718,11 @@ static const struct hv_vmbus_device_id id_table[] = { { HV_IDE_GUID, .driver_data = IDE_GUID }, + /* Fibre Channel GUID */ + { + HV_SYNTHFC_GUID, + .driver_data = SFC_GUID + }, { }, }; @@ -1485,6 +1738,24 @@ static int storvsc_probe(struct hv_device *device, int target = 0; struct storvsc_device *stor_device; + /* + * Based on the windows host we are running on, + * set state to properly communicate with the host. + */ + + if (vmbus_proto_version == VERSION_WIN8) { + sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; + vmscsi_size_delta = 0; + vmstor_current_major = VMSTOR_WIN8_MAJOR; + vmstor_current_minor = VMSTOR_WIN8_MINOR; + } else { + sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; + vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); + vmstor_current_major = VMSTOR_WIN7_MAJOR; + vmstor_current_minor = VMSTOR_WIN7_MINOR; + } + + host = scsi_host_alloc(&scsi_driver, sizeof(struct hv_host_device)); if (!host) @@ -1504,6 +1775,7 @@ static int storvsc_probe(struct hv_device *device, } stor_device->destroy = false; + stor_device->open_sub_channel = false; init_waitqueue_head(&stor_device->waiting_to_drain); stor_device->device = device; stor_device->host = host; @@ -1594,7 +1866,8 @@ static int __init storvsc_drv_init(void) max_outstanding_req_per_channel = ((storvsc_ringbuffer_size - PAGE_SIZE) / ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + - sizeof(struct vstor_packet) + sizeof(u64), + sizeof(struct vstor_packet) + sizeof(u64) - + vmscsi_size_delta, sizeof(u64))); if (max_outstanding_req_per_channel < @@ -1610,7 +1883,6 @@ static void __exit storvsc_drv_exit(void) } MODULE_LICENSE("GPL"); -MODULE_VERSION(HV_DRV_VERSION); MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); module_init(storvsc_drv_init); module_exit(storvsc_drv_exit); diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 7e12a2e4e0a..88220794cc9 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) return( 0 ); if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { - TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); return( 1 ); } @@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); - TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else { @@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); set_bit( cmd->tag, &ta->allocated ); ta->nr_allocated++; - TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " + dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated ); @@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd) if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); - TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else if (cmd->tag >= MAX_TAGS) { @@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd) TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit( cmd->tag, &ta->allocated ); ta->nr_allocated--; - TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", + dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); } } @@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) #include <linux/delay.h> -#if 1 +#if NDEBUG static struct { unsigned char mask; const char * name;} @@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) } } -#else /* !NDEBUG */ - -/* dummies... */ -__inline__ void NCR5380_print(struct Scsi_Host *instance) { }; -__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; - #endif /* @@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void) { static int done = 0; if (!done) { - INI_PRINTK("scsi : NCR5380_all_init()\n"); + dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); done = 1; } } @@ -661,121 +655,94 @@ static void __init NCR5380_print_options (struct Scsi_Host *instance) * Inputs : instance, pointer to this instance. */ -static void NCR5380_print_status (struct Scsi_Host *instance) +static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd) { - char *pr_bfr; - char *start; - int len; - - NCR_PRINT(NDEBUG_ANY); - NCR_PRINT_PHASE(NDEBUG_ANY); - - pr_bfr = (char *) __get_free_page(GFP_ATOMIC); - if (!pr_bfr) { - printk("NCR5380_print_status: no memory for print buffer\n"); - return; - } - len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0); - pr_bfr[len] = 0; - printk("\n%s\n", pr_bfr); - free_page((unsigned long) pr_bfr); + int i, s; + unsigned char *command; + printk("scsi%d: destination target %d, lun %d\n", + H_NO(cmd), cmd->device->id, cmd->device->lun); + printk(KERN_CONT " command = "); + command = cmd->cmnd; + printk(KERN_CONT "%2d (0x%02x)", command[0], command[0]); + for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) + printk(KERN_CONT " %02x", command[i]); + printk("\n"); } - -/******************************************/ -/* - * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] - * - * *buffer: I/O buffer - * **start: if inout == FALSE pointer into buffer where user read should start - * offset: current offset - * length: length of buffer - * hostno: Scsi_Host host_no - * inout: TRUE - user is writing; FALSE - user is reading - * - * Return the number of bytes read from or written -*/ - -#undef SPRINTF -#define SPRINTF(fmt,args...) \ - do { if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ - pos += sprintf(pos, fmt , ## args); } while(0) -static -char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, - int length); - -static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, - char **start, off_t offset, int length, int inout) +static void NCR5380_print_status(struct Scsi_Host *instance) { - char *pos = buffer; - struct NCR5380_hostdata *hostdata; - struct scsi_cmnd *ptr; - unsigned long flags; - off_t begin = 0; -#define check_offset() \ - do { \ - if (pos - buffer < offset - begin) { \ - begin += pos - buffer; \ - pos = buffer; \ - } \ - } while (0) - - hostdata = (struct NCR5380_hostdata *)instance->hostdata; - - if (inout) { /* Has data been written to the file ? */ - return(-ENOSYS); /* Currently this is a no-op */ - } - SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); - check_offset(); - local_irq_save(flags); - SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't"); - check_offset(); - if (!hostdata->connected) - SPRINTF("scsi%d: no currently connected command\n", HOSTNO); - else - pos = lprint_Scsi_Cmnd ((struct scsi_cmnd *) hostdata->connected, - pos, buffer, length); - SPRINTF("scsi%d: issue_queue\n", HOSTNO); - check_offset(); - for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = NEXT(ptr)) - { - pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); - check_offset(); - } + struct NCR5380_hostdata *hostdata; + Scsi_Cmnd *ptr; + unsigned long flags; + + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); + + hostdata = (struct NCR5380_hostdata *)instance->hostdata; + + printk("\nNCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); + local_irq_save(flags); + printk("NCR5380: coroutine is%s running.\n", + main_running ? "" : "n't"); + if (!hostdata->connected) + printk("scsi%d: no currently connected command\n", HOSTNO); + else + lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected); + printk("scsi%d: issue_queue\n", HOSTNO); + for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) + lprint_Scsi_Cmnd(ptr); + + printk("scsi%d: disconnected_queue\n", HOSTNO); + for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; + ptr = NEXT(ptr)) + lprint_Scsi_Cmnd(ptr); - SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); - check_offset(); - for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; - ptr = NEXT(ptr)) { - pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); - check_offset(); - } - - local_irq_restore(flags); - *start = buffer + (offset - begin); - if (pos - buffer < offset - begin) - return 0; - else if (pos - buffer - (offset - begin) < length) - return pos - buffer - (offset - begin); - return length; + local_irq_restore(flags); + printk("\n"); } -static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, - int length) +static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m) { - int i, s; - unsigned char *command; - SPRINTF("scsi%d: destination target %d, lun %d\n", - H_NO(cmd), cmd->device->id, cmd->device->lun); - SPRINTF(" command = "); - command = cmd->cmnd; - SPRINTF("%2d (0x%02x)", command[0], command[0]); - for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) - SPRINTF(" %02x", command[i]); - SPRINTF("\n"); - return pos; + int i, s; + unsigned char *command; + seq_printf(m, "scsi%d: destination target %d, lun %d\n", + H_NO(cmd), cmd->device->id, cmd->device->lun); + seq_printf(m, " command = "); + command = cmd->cmnd; + seq_printf(m, "%2d (0x%02x)", command[0], command[0]); + for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) + seq_printf(m, " %02x", command[i]); + seq_printf(m, "\n"); } +static int NCR5380_show_info(struct seq_file *m, struct Scsi_Host *instance) +{ + struct NCR5380_hostdata *hostdata; + Scsi_Cmnd *ptr; + unsigned long flags; + + hostdata = (struct NCR5380_hostdata *)instance->hostdata; + + seq_printf(m, "NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); + local_irq_save(flags); + seq_printf(m, "NCR5380: coroutine is%s running.\n", + main_running ? "" : "n't"); + if (!hostdata->connected) + seq_printf(m, "scsi%d: no currently connected command\n", HOSTNO); + else + show_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, m); + seq_printf(m, "scsi%d: issue_queue\n", HOSTNO); + for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) + show_Scsi_Cmnd(ptr, m); + + seq_printf(m, "scsi%d: disconnected_queue\n", HOSTNO); + for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; + ptr = NEXT(ptr)) + show_Scsi_Cmnd(ptr, m); + + local_irq_restore(flags); + return 0; +} /* * Function : void NCR5380_init (struct Scsi_Host *instance) @@ -955,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, local_irq_restore(flags); - QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), + dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom @@ -1025,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl) done = 1; if (!hostdata->connected) { - MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); + dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO ); /* * Search through the issue_queue for a command destined * for a target that's not busy. @@ -1039,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl) for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { -#if (NDEBUG & NDEBUG_LISTS) if (prev != tmp) - printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", - tmp, tmp->target, hostdata->busy[tmp->target], - tmp->lun); -#endif + dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( @@ -1074,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl) * On failure, we must add the command back to the * issue queue so we can keep trying. */ - MAIN_PRINTK("scsi%d: main(): command for target %d " + dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", - HOSTNO, tmp->target, tmp->lun); + HOSTNO, tmp->device->id, tmp->device->lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the @@ -1103,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl) cmd_free_tag( tmp ); #endif local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main(): select() failed, " + dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; @@ -1117,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl) #endif ) { local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main: performing information transfer\n", + dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); - MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); + dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); @@ -1157,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance ) return; } - DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", + dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -1216,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) int done = 1, handled = 0; unsigned char basr; - INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); - INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); + dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { - NCR_PRINT(NDEBUG_INTR); + NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; // ENABLE_IRQ(); - INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { - INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { @@ -1256,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { - INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; // ENABLE_IRQ(); @@ -1265,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) - INT_PRINTK("scsi%d: unknown interrupt, " + dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); @@ -1289,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) } if (!done) { - INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } @@ -1365,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, unsigned long flags; hostdata->restart_select = 0; - NCR_PRINT(NDEBUG_ARBITRATION); - ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, + NCR5380_dprint(NDEBUG_ARBITRATION, instance); + dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* @@ -1412,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, && !hostdata->connected); #endif - ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); @@ -1433,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } @@ -1448,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } @@ -1471,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, return -1; } - ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting @@ -1531,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, udelay(1); - SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); + dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual @@ -1586,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } @@ -1599,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } @@ -1624,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); - SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", + dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); @@ -1644,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS @@ -1707,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); - HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { - PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); - NCR_PRINT_PHASE(NDEBUG_PIO); + dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); + NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } @@ -1735,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ); - HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : @@ -1773,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, } } while (--c); - PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); + dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; @@ -1881,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance, } hostdata->dma_len = c; - DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", + dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", *data); @@ -1958,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; - NCR_PRINT_PHASE(NDEBUG_INFORMATION); + NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } if(phase == PHASE_CMDOUT) { @@ -2023,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); - INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", + dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } @@ -2115,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - LNK_PRINTK("scsi%d: target %d lun %d linked command " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ @@ -2140,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - LNK_PRINTK("scsi%d: target %d lun %d linked request " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS @@ -2155,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; - QU_PRINTK("scsi%d: command for target %d, lun %d " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); @@ -2169,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; - TAG_PRINTK("scsi%d: target %d lun %d returned " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); @@ -2213,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - ASEN_PRINTK("scsi%d: performing request sense\n", + dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); /* this is initialized from initialize_SCp cmd->SCp.buffer = NULL; @@ -2225,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (struct scsi_cmnd *) cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: REQUEST SENSE added to head of " + dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ @@ -2265,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; - TAG_PRINTK("scsi%d: target %d lun %d rejected " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2282,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: command for target %d lun %d was " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2335,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); + dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, + dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= @@ -2353,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: message received, residual %d\n", + dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { @@ -2443,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) break; default: printk("scsi%d: unknown phase\n", HOSTNO); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ @@ -2485,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance) target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - RSL_PRINTK("scsi%d: reselect\n", HOSTNO); + dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2607,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance) if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; - TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " + dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif hostdata->connected = tmp; - RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", - HOSTNO, tmp->target, tmp->lun, tmp->tag); + dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", + HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); } @@ -2649,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) local_irq_save(flags); - ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, + dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -2662,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (hostdata->connected == cmd) { - ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. @@ -2691,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) #endif local_irq_restore(flags); cmd->scsi_done(cmd); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); - return SCSI_ABORT_ERROR; + return FAILED; } } #endif @@ -2713,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", + dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } /* @@ -2734,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (hostdata->connected) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); - return SCSI_ABORT_SNOOZE; + dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); + return FAILED; } /* @@ -2767,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) tmp = NEXT(tmp)) if (cmd == tmp) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select (instance, cmd, (int) cmd->tag)) - return SCSI_ABORT_BUSY; + return FAILED; - ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); do_abort (instance); @@ -2796,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) #endif local_irq_restore(flags); tmp->scsi_done(tmp); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } } @@ -2813,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) local_irq_restore(flags); printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); - return SCSI_ABORT_NOT_RUNNING; + return FAILED; } @@ -2822,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * - * Returns : SCSI_RESET_WAKEUP + * Returns : SUCCESS or FAILURE * */ @@ -2831,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; -#if 1 +#if defined(RESET_RUN_DONE) struct scsi_cmnd *connected, *disconnected_queue; #endif @@ -2853,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) * through anymore ... */ (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); -#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ - /* XXX see below XXX */ + /* MSch 20140115 - looking at the generic NCR5380 driver, all of this + * should go. + * Catch-22: if we don't clear all queues, the SCSI driver lock will + * not be released by atari_scsi_reset()! + */ + +#if defined(RESET_RUN_DONE) + /* XXX Should now be done by midlevel code, but it's broken XXX */ + /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ @@ -2884,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) */ if ((cmd = connected)) { - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } @@ -2896,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) cmd->scsi_done( cmd ); } if (i > 0) - ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); + dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); /* since all commands have been explicitly terminated, we need to tell * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ - return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; + return SUCCESS; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ @@ -2930,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) */ if (hostdata->issue_queue) - ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) - ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; @@ -2951,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ - return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; + return SUCCESS; #endif /* 1 */ } diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 6e25889db9d..9707b7494a8 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -3,6 +3,10 @@ * * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) * + * VME support added by Sam Creasey + * + * TODO: modify this driver to support multiple Sun3 SCSI VME boards + * * Adapted from mac_scsinew.c: */ /* @@ -45,10 +49,6 @@ * USLEEP - enable support for devices that don't disconnect. Untested. */ -/* - * $Log: sun3_NCR5380.c,v $ - */ - #define AUTOSENSE #include <linux/types.h> @@ -69,23 +69,15 @@ #include <asm/idprom.h> #include <asm/machines.h> -#define NDEBUG 0 - -#define NDEBUG_ABORT 0x00100000 -#define NDEBUG_TAGS 0x00200000 -#define NDEBUG_MERGING 0x00400000 - /* dma on! */ #define REAL_DMA #include "scsi.h" -#include "initio.h" #include <scsi/scsi_host.h> #include "sun3_scsi.h" +#include "NCR5380.h" -static void NCR5380_print(struct Scsi_Host *instance); - -/* #define OLDDMA */ +extern int sun3_map_test(unsigned long, char *); #define USE_WRAPPER /*#define RESET_BOOT */ @@ -101,7 +93,11 @@ static void NCR5380_print(struct Scsi_Host *instance); /* #define SUPPORT_TAGS */ +#ifdef SUN3_SCSI_VME +#define ENABLE_IRQ() +#else #define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); +#endif static irqreturn_t scsi_sun3_intr(int irq, void *dummy); @@ -123,6 +119,8 @@ module_param(setup_hostid, int, 0); static struct scsi_cmnd *sun3_dma_setup_done = NULL; +#define RESET_RUN_DONE + #define AFTER_RESET_DELAY (HZ/2) /* ms to wait after hitting dma regs */ @@ -136,10 +134,9 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL; static volatile unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; -#ifdef OLDDMA -static unsigned char *dmabuf = NULL; /* dma memory buffer */ -#endif +#ifndef SUN3_SCSI_VME static struct sun3_udc_regs *udc_regs = NULL; +#endif static unsigned char *sun3_dma_orig_addr = NULL; static unsigned long sun3_dma_orig_count = 0; static int sun3_dma_active = 0; @@ -159,6 +156,7 @@ static inline void sun3scsi_write(int reg, int value) sun3_scsi_regp[reg] = value; } +#ifndef SUN3_SCSI_VME /* dma controller register access functions */ static inline unsigned short sun3_udc_read(unsigned char reg) @@ -180,6 +178,7 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg) dregs->udc_data = val; udelay(SUN3_DMA_DELAY); } +#endif /* * XXX: status debug @@ -198,17 +197,32 @@ static struct Scsi_Host *default_instance; * */ -int __init sun3scsi_detect(struct scsi_host_template * tpnt) +static int __init sun3scsi_detect(struct scsi_host_template *tpnt) { - unsigned long ioaddr; + unsigned long ioaddr, irq; static int called = 0; struct Scsi_Host *instance; +#ifdef SUN3_SCSI_VME + int i; + unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, + IOBASE_SUN3_VMESCSI + 0x4000, + 0 }; + unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, + SUN3_VEC_VMESCSI1, + 0 }; +#endif /* check that this machine has an onboard 5380 */ switch(idprom->id_machtype) { +#ifdef SUN3_SCSI_VME + case SM_SUN3|SM_3_160: + case SM_SUN3|SM_3_260: + break; +#else case SM_SUN3|SM_3_50: case SM_SUN3|SM_3_60: break; +#endif default: return 0; @@ -217,7 +231,11 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) if(called) return 0; +#ifdef SUN3_SCSI_VME + tpnt->proc_name = "Sun3 5380 VME SCSI"; +#else tpnt->proc_name = "Sun3 5380 SCSI"; +#endif /* setup variables */ tpnt->can_queue = @@ -234,6 +252,38 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) tpnt->this_id = 7; } +#ifdef SUN3_SCSI_VME + ioaddr = 0; + for (i = 0; addrs[i] != 0; i++) { + unsigned char x; + + ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, + SUN3_PAGE_TYPE_VME16); + irq = vecs[i]; + sun3_scsi_regp = (unsigned char *)ioaddr; + + dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); + + if (sun3_map_test((unsigned long)dregs, &x)) { + unsigned short oldcsr; + + oldcsr = dregs->csr; + dregs->csr = 0; + udelay(SUN3_DMA_DELAY); + if (dregs->csr == 0x1400) + break; + + dregs->csr = oldcsr; + } + + iounmap((void *)ioaddr); + ioaddr = 0; + } + + if (!ioaddr) + return 0; +#else + irq = IRQ_SUN3_SCSI; ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); sun3_scsi_regp = (unsigned char *)ioaddr; @@ -244,11 +294,6 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); return 0; } -#ifdef OLDDMA - if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) { - printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); - return 0; - } #endif #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) @@ -262,7 +307,7 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) default_instance = instance; instance->io_port = (unsigned long) ioaddr; - instance->irq = IRQ_SUN3_SCSI; + instance->irq = irq; NCR5380_init(instance, 0); @@ -283,7 +328,8 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) #endif } - printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port); + pr_info("scsi%d: %s at port %lX irq", instance->host_no, + tpnt->proc_name, instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk ("s disabled"); else @@ -300,6 +346,15 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; udelay(SUN3_DMA_DELAY); dregs->fifo_count = 0; +#ifdef SUN3_SCSI_VME + dregs->fifo_count_hi = 0; + dregs->dma_addr_hi = 0; + dregs->dma_addr_lo = 0; + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + + dregs->ivect = VME_DATA24 | (instance->irq & 0xff); +#endif called = 1; @@ -367,7 +422,8 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance) } #endif -const char * sun3scsi_info (struct Scsi_Host *spnt) { +static const char *sun3scsi_info(struct Scsi_Host *spnt) +{ return ""; } @@ -379,6 +435,10 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy) unsigned short csr = dregs->csr; int handled = 0; +#ifdef SUN3_SCSI_VME + dregs->csr &= ~CSR_DMA_ENABLE; +#endif + if(csr & ~CSR_GOOD) { if(csr & CSR_DMA_BUSERR) { printk("scsi%d: bus error in dma\n", default_instance->host_no); @@ -422,31 +482,28 @@ void sun3_sun3_debug (void) /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) { -#ifdef OLDDMA - if(write_flag) - memcpy(dmabuf, data, count); - else { - sun3_dma_orig_addr = data; - sun3_dma_orig_count = count; - } -#else void *addr; if(sun3_dma_orig_addr != NULL) dvma_unmap(sun3_dma_orig_addr); -// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); +#ifdef SUN3_SCSI_VME + addr = (void *)dvma_map_vme((unsigned long) data, count); +#else addr = (void *)dvma_map((unsigned long) data, count); +#endif sun3_dma_orig_addr = addr; sun3_dma_orig_count = count; -#endif + +#ifndef SUN3_SCSI_VME dregs->fifo_count = 0; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; +#endif /* set direction */ if(write_flag) @@ -454,6 +511,17 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri else dregs->csr &= ~CSR_SEND; +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_PACK_ENABLE; + + dregs->dma_addr_hi = ((unsigned long)addr >> 16); + dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); + + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + dregs->fifo_count_hi = 0; + dregs->fifo_count = 0; +#else /* byte count for fifo */ dregs->fifo_count = count; @@ -467,17 +535,12 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri printk("scsi%d: fifo_mismatch %04x not %04x\n", default_instance->host_no, dregs->fifo_count, (unsigned int) count); - NCR5380_print(default_instance); + NCR5380_dprint(NDEBUG_DMA, default_instance); } /* setup udc */ -#ifdef OLDDMA - udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8); - udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff); -#else udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); -#endif udc_regs->count = count/2; /* count in words */ udc_regs->mode_hi = UDC_MODE_HIWORD; if(write_flag) { @@ -501,11 +564,13 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri /* interrupt enable */ sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); +#endif return count; } +#ifndef SUN3_SCSI_VME static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) { unsigned short resid; @@ -518,6 +583,7 @@ static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) return (unsigned long) resid; } +#endif static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) { @@ -536,8 +602,23 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) { +#ifdef SUN3_SCSI_VME + unsigned short csr; + + csr = dregs->csr; + dregs->dma_count_hi = (sun3_dma_orig_count >> 16); + dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); + + dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); + dregs->fifo_count = (sun3_dma_orig_count & 0xffff); + +/* if(!(csr & CSR_DMA_ENABLE)) + * dregs->csr |= CSR_DMA_ENABLE; + */ +#else sun3_udc_write(UDC_CHN_START, UDC_CSR); +#endif return 0; } @@ -545,12 +626,46 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) /* clean up after our dma is done */ static int sun3scsi_dma_finish(int write_flag) { - unsigned short count; + unsigned short __maybe_unused count; unsigned short fifo; int ret = 0; sun3_dma_active = 0; -#if 1 + +#ifdef SUN3_SCSI_VME + dregs->csr &= ~CSR_DMA_ENABLE; + + fifo = dregs->fifo_count; + if (write_flag) { + if ((fifo > 0) && (fifo < sun3_dma_orig_count)) + fifo++; + } + + last_residual = fifo; + /* empty bytes from the fifo which didn't make it */ + if ((!write_flag) && (dregs->csr & CSR_LEFT)) { + unsigned char *vaddr; + + vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); + + vaddr += (sun3_dma_orig_count - fifo); + vaddr--; + + switch (dregs->csr & CSR_LEFT) { + case CSR_LEFT_3: + *vaddr = (dregs->bpack_lo & 0xff00) >> 8; + vaddr--; + + case CSR_LEFT_2: + *vaddr = (dregs->bpack_hi & 0x00ff); + vaddr--; + + case CSR_LEFT_1: + *vaddr = (dregs->bpack_hi & 0xff00) >> 8; + break; + } + } +#else // check to empty the fifo on a read if(!write_flag) { int tmo = 20000; /* .2 sec */ @@ -566,28 +681,8 @@ static int sun3scsi_dma_finish(int write_flag) udelay(10); } } - -#endif count = sun3scsi_dma_count(default_instance); -#ifdef OLDDMA - - /* if we've finished a read, copy out the data we read */ - if(sun3_dma_orig_addr) { - /* check for residual bytes after dma end */ - if(count && (NCR5380_read(BUS_AND_STATUS_REG) & - (BASR_PHASE_MATCH | BASR_ACK))) { - printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no); - printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG)); - ret = count; - } - - /* copy in what we dma'd no matter what */ - memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count); - sun3_dma_orig_addr = NULL; - - } -#else fifo = dregs->fifo_count; last_residual = fifo; @@ -605,10 +700,23 @@ static int sun3scsi_dma_finish(int write_flag) vaddr[-2] = (data & 0xff00) >> 8; vaddr[-1] = (data & 0xff); } +#endif dvma_unmap(sun3_dma_orig_addr); sun3_dma_orig_addr = NULL; -#endif + +#ifdef SUN3_SCSI_VME + dregs->dma_addr_hi = 0; + dregs->dma_addr_lo = 0; + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + + dregs->fifo_count = 0; + dregs->fifo_count_hi = 0; + + dregs->csr &= ~CSR_SEND; +/* dregs->csr |= CSR_DMA_ENABLE; */ +#else sun3_udc_write(UDC_RESET, UDC_CSR); dregs->fifo_count = 0; dregs->csr &= ~CSR_SEND; @@ -616,6 +724,7 @@ static int sun3scsi_dma_finish(int write_flag) /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; +#endif sun3_dma_setup_done = NULL; @@ -626,6 +735,7 @@ static int sun3scsi_dma_finish(int write_flag) #include "sun3_NCR5380.c" static struct scsi_host_template driver_template = { + .show_info = sun3scsi_show_info, .name = SUN3_SCSI_NAME, .detect = sun3scsi_detect, .release = sun3scsi_release, diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index bcefd8458e6..e96a37cf06a 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h @@ -29,12 +29,8 @@ * 1+ (800) 334-5454 */ -/* - * $Log: cumana_NCR5380.h,v $ - */ - -#ifndef SUN3_NCR5380_H -#define SUN3_NCR5380_H +#ifndef SUN3_SCSI_H +#define SUN3_SCSI_H #define SUN3SCSI_PUBLIC_RELEASE 1 @@ -82,8 +78,6 @@ static int sun3scsi_release (struct Scsi_Host *); #define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" #endif -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ int port, ctrl @@ -100,7 +94,7 @@ static int sun3scsi_release (struct Scsi_Host *); #define NCR5380_queue_command sun3scsi_queue_command #define NCR5380_bus_reset sun3scsi_bus_reset #define NCR5380_abort sun3scsi_abort -#define NCR5380_proc_info sun3scsi_proc_info +#define NCR5380_show_info sun3scsi_show_info #define NCR5380_dma_xfer_len(i, cmd, phase) \ sun3scsi_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) @@ -108,9 +102,6 @@ static int sun3scsi_release (struct Scsi_Host *); #define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0) #define NCR5380_dma_residual sun3scsi_dma_residual -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 - /* additional registers - mainly DMA control regs */ /* these start at regbase + 8 -- directly after the NCR regs */ struct sun3_dma_regs { @@ -191,189 +182,5 @@ struct sun3_udc_regs { #define VME_DATA24 0x3d00 -// debugging printk's, taken from atari_scsi.h -/* Debugging printk definitions: - * - * ARB -> arbitration - * ASEN -> auto-sense - * DMA -> DMA - * HSH -> PIO handshake - * INF -> information transfer - * INI -> initialization - * INT -> interrupt - * LNK -> linked commands - * MAIN -> NCR5380_main() control flow - * NDAT -> no data-out phase - * NWR -> no write commands - * PIO -> PIO transfers - * PDMA -> pseudo DMA (unused on Atari) - * QU -> queues - * RSL -> reselections - * SEL -> selections - * USL -> usleep cpde (unused on Atari) - * LBS -> last byte sent (unused on Atari) - * RSS -> restarting of selections - * EXT -> extended messages - * ABRT -> aborting and resetting - * TAG -> queue tag handling - * MER -> merging of consec. buffers - * - */ - -#include "NCR5380.h" - -#if NDEBUG & NDEBUG_ARBITRATION -#define ARB_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define ARB_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_AUTOSENSE -#define ASEN_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define ASEN_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_DMA -#define DMA_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define DMA_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_HANDSHAKE -#define HSH_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define HSH_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INFORMATION -#define INF_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define INF_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INIT -#define INI_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define INI_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INTR -#define INT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define INT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_LINKED -#define LNK_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define LNK_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_MAIN -#define MAIN_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define MAIN_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_NO_DATAOUT -#define NDAT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define NDAT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_NO_WRITE -#define NWR_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define NWR_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_PIO -#define PIO_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define PIO_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_PSEUDO_DMA -#define PDMA_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define PDMA_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_QUEUES -#define QU_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define QU_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_RESELECTION -#define RSL_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define RSL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_SELECTION -#define SEL_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define SEL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_USLEEP -#define USL_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define USL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_LAST_BYTE_SENT -#define LBS_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define LBS_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_RESTART_SELECT -#define RSS_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define RSS_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_EXTENDED -#define EXT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define EXT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_ABORT -#define ABRT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define ABRT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_TAGS -#define TAG_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define TAG_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_MERGING -#define MER_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define MER_PRINTK(format, args...) -#endif - -/* conditional macros for NCR5380_print_{,phase,status} */ - -#define NCR_PRINT(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) - -#define NCR_PRINT_PHASE(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) - -#define NCR_PRINT_STATUS(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) - - - -#endif /* ndef HOSTS_C */ -#endif /* SUN3_NCR5380_H */ +#endif /* SUN3_SCSI_H */ diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index a3dd55d1d2f..1eeece6e204 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -1,589 +1,3 @@ - /* - * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) - * - * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) - * - * VME support added by Sam Creasey - * - * Adapted from sun3_scsi.c -- see there for other headers - * - * TODO: modify this driver to support multiple Sun3 SCSI VME boards - * - */ - -#define AUTOSENSE - -#include <linux/types.h> -#include <linux/stddef.h> -#include <linux/ctype.h> -#include <linux/delay.h> - -#include <linux/module.h> -#include <linux/signal.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/blkdev.h> - -#include <asm/io.h> - -#include <asm/sun3ints.h> -#include <asm/dvma.h> -#include <asm/idprom.h> -#include <asm/machines.h> - #define SUN3_SCSI_VME -#undef SUN3_SCSI_DEBUG - -/* dma on! */ -#define REAL_DMA - -#define NDEBUG 0 - -#define NDEBUG_ABORT 0x00100000 -#define NDEBUG_TAGS 0x00200000 -#define NDEBUG_MERGING 0x00400000 - -#include "scsi.h" -#include "initio.h" -#include <scsi/scsi_host.h> -#include "sun3_scsi.h" - -extern int sun3_map_test(unsigned long, char *); - -#define USE_WRAPPER -/*#define RESET_BOOT */ -#define DRIVER_SETUP - -/* - * BUG can be used to trigger a strange code-size related hang on 2.1 kernels - */ -#ifdef BUG -#undef RESET_BOOT -#undef DRIVER_SETUP -#endif - -/* #define SUPPORT_TAGS */ - -//#define ENABLE_IRQ() enable_irq( SUN3_VEC_VMESCSI0 ); -#define ENABLE_IRQ() - - -static irqreturn_t scsi_sun3_intr(int irq, void *dummy); -static inline unsigned char sun3scsi_read(int reg); -static inline void sun3scsi_write(int reg, int value); - -static int setup_can_queue = -1; -module_param(setup_can_queue, int, 0); -static int setup_cmd_per_lun = -1; -module_param(setup_cmd_per_lun, int, 0); -static int setup_sg_tablesize = -1; -module_param(setup_sg_tablesize, int, 0); -#ifdef SUPPORT_TAGS -static int setup_use_tagged_queuing = -1; -module_param(setup_use_tagged_queuing, int, 0); -#endif -static int setup_hostid = -1; -module_param(setup_hostid, int, 0); - -static struct scsi_cmnd *sun3_dma_setup_done = NULL; - -#define AFTER_RESET_DELAY (HZ/2) - -/* ms to wait after hitting dma regs */ -#define SUN3_DMA_DELAY 10 - -/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ -#define SUN3_DVMA_BUFSIZE 0xe000 - -/* minimum number of bytes to do dma on */ -#define SUN3_DMA_MINSIZE 128 - -static volatile unsigned char *sun3_scsi_regp; -static volatile struct sun3_dma_regs *dregs; -#ifdef OLDDMA -static unsigned char *dmabuf = NULL; /* dma memory buffer */ -#endif -static unsigned char *sun3_dma_orig_addr = NULL; -static unsigned long sun3_dma_orig_count = 0; -static int sun3_dma_active = 0; -static unsigned long last_residual = 0; - -/* - * NCR 5380 register access functions - */ - -static inline unsigned char sun3scsi_read(int reg) -{ - return( sun3_scsi_regp[reg] ); -} - -static inline void sun3scsi_write(int reg, int value) -{ - sun3_scsi_regp[reg] = value; -} - -/* - * XXX: status debug - */ -static struct Scsi_Host *default_instance; - -/* - * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) - * - * Purpose : initializes mac NCR5380 driver based on the - * command line / compile time port and irq definitions. - * - * Inputs : tpnt - template for this SCSI adapter. - * - * Returns : 1 if a host adapter was found, 0 if not. - * - */ - -static int __init sun3scsi_detect(struct scsi_host_template * tpnt) -{ - unsigned long ioaddr, irq = 0; - static int called = 0; - struct Scsi_Host *instance; - int i; - unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, - IOBASE_SUN3_VMESCSI + 0x4000, - 0 }; - unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, - SUN3_VEC_VMESCSI1, - 0 }; - /* check that this machine has an onboard 5380 */ - switch(idprom->id_machtype) { - case SM_SUN3|SM_3_160: - case SM_SUN3|SM_3_260: - break; - - default: - return 0; - } - - if(called) - return 0; - - tpnt->proc_name = "Sun3 5380 VME SCSI"; - - /* setup variables */ - tpnt->can_queue = - (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; - tpnt->cmd_per_lun = - (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; - tpnt->sg_tablesize = - (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; - - if (setup_hostid >= 0) - tpnt->this_id = setup_hostid; - else { - /* use 7 as default */ - tpnt->this_id = 7; - } - - ioaddr = 0; - for(i = 0; addrs[i] != 0; i++) { - unsigned char x; - - ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, - SUN3_PAGE_TYPE_VME16); - irq = vecs[i]; - sun3_scsi_regp = (unsigned char *)ioaddr; - - dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); - - if(sun3_map_test((unsigned long)dregs, &x)) { - unsigned short oldcsr; - - oldcsr = dregs->csr; - dregs->csr = 0; - udelay(SUN3_DMA_DELAY); - if(dregs->csr == 0x1400) - break; - - dregs->csr = oldcsr; - } - - iounmap((void *)ioaddr); - ioaddr = 0; - } - - if(!ioaddr) - return 0; - -#ifdef SUPPORT_TAGS - if (setup_use_tagged_queuing < 0) - setup_use_tagged_queuing = USE_TAGGED_QUEUING; -#endif - - instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); - if(instance == NULL) - return 0; - - default_instance = instance; - - instance->io_port = (unsigned long) ioaddr; - instance->irq = irq; - - NCR5380_init(instance, 0); - - instance->n_io_port = 32; - - ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; - - if (request_irq(instance->irq, scsi_sun3_intr, - 0, "Sun3SCSI-5380VME", instance)) { -#ifndef REAL_DMA - printk("scsi%d: IRQ%d not free, interrupts disabled\n", - instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; -#else - printk("scsi%d: IRQ%d not free, bailing out\n", - instance->host_no, instance->irq); - return 0; -#endif - } - - printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port); - if (instance->irq == SCSI_IRQ_NONE) - printk ("s disabled"); - else - printk (" %d", instance->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - instance->can_queue, instance->cmd_per_lun, - SUN3SCSI_PUBLIC_RELEASE); - printk("\nscsi%d:", instance->host_no); - NCR5380_print_options(instance); - printk("\n"); - - dregs->csr = 0; - udelay(SUN3_DMA_DELAY); - dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; - udelay(SUN3_DMA_DELAY); - dregs->fifo_count = 0; - dregs->fifo_count_hi = 0; - dregs->dma_addr_hi = 0; - dregs->dma_addr_lo = 0; - dregs->dma_count_hi = 0; - dregs->dma_count_lo = 0; - - dregs->ivect = VME_DATA24 | (instance->irq & 0xff); - - called = 1; - -#ifdef RESET_BOOT - sun3_scsi_reset_boot(instance); -#endif - - return 1; -} - -int sun3scsi_release (struct Scsi_Host *shpnt) -{ - if (shpnt->irq != SCSI_IRQ_NONE) - free_irq(shpnt->irq, shpnt); - - iounmap((void *)sun3_scsi_regp); - - NCR5380_exit(shpnt); - return 0; -} - -#ifdef RESET_BOOT -/* - * Our 'bus reset on boot' function - */ - -static void sun3_scsi_reset_boot(struct Scsi_Host *instance) -{ - unsigned long end; - - NCR5380_local_declare(); - NCR5380_setup(instance); - - /* - * Do a SCSI reset to clean up the bus during initialization. No - * messing with the queues, interrupts, or locks necessary here. - */ - - printk( "Sun3 SCSI: resetting the SCSI bus..." ); - - /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ -// sun3_disable_irq( IRQ_SUN3_SCSI ); - - /* get in phase */ - NCR5380_write( TARGET_COMMAND_REG, - PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); - - /* assert RST */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); - - /* The min. reset hold time is 25us, so 40us should be enough */ - udelay( 50 ); - - /* reset RST and interrupt */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - NCR5380_read( RESET_PARITY_INTERRUPT_REG ); - - for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) - barrier(); - - /* switch on SCSI IRQ again */ -// sun3_enable_irq( IRQ_SUN3_SCSI ); - - printk( " done\n" ); -} -#endif - -static const char * sun3scsi_info (struct Scsi_Host *spnt) { - return ""; -} - -// safe bits for the CSR -#define CSR_GOOD 0x060f - -static irqreturn_t scsi_sun3_intr(int irq, void *dummy) -{ - unsigned short csr = dregs->csr; - int handled = 0; - - dregs->csr &= ~CSR_DMA_ENABLE; - - -#ifdef SUN3_SCSI_DEBUG - printk("scsi_intr csr %x\n", csr); -#endif - - if(csr & ~CSR_GOOD) { - if(csr & CSR_DMA_BUSERR) { - printk("scsi%d: bus error in dma\n", default_instance->host_no); -#ifdef SUN3_SCSI_DEBUG - printk("scsi: residual %x count %x addr %p dmaaddr %x\n", - dregs->fifo_count, - dregs->dma_count_lo | (dregs->dma_count_hi << 16), - sun3_dma_orig_addr, - dregs->dma_addr_lo | (dregs->dma_addr_hi << 16)); -#endif - } - - if(csr & CSR_DMA_CONFLICT) { - printk("scsi%d: dma conflict\n", default_instance->host_no); - } - handled = 1; - } - - if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { - NCR5380_intr(irq, dummy); - handled = 1; - } - - return IRQ_RETVAL(handled); -} - -/* - * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; - * reentering NCR5380_print_status seems to have ugly side effects - */ - -/* this doesn't seem to get used at all -- sam */ -#if 0 -void sun3_sun3_debug (void) -{ - unsigned long flags; - NCR5380_local_declare(); - - if (default_instance) { - local_irq_save(flags); - NCR5380_print_status(default_instance); - local_irq_restore(flags); - } -} -#endif - - -/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ -static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) -{ - void *addr; - - if(sun3_dma_orig_addr != NULL) - dvma_unmap(sun3_dma_orig_addr); - -// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); - addr = (void *)dvma_map_vme((unsigned long) data, count); - - sun3_dma_orig_addr = addr; - sun3_dma_orig_count = count; - -#ifdef SUN3_SCSI_DEBUG - printk("scsi: dma_setup addr %p count %x\n", addr, count); -#endif - -// dregs->fifo_count = 0; -#if 0 - /* reset fifo */ - dregs->csr &= ~CSR_FIFO; - dregs->csr |= CSR_FIFO; -#endif - /* set direction */ - if(write_flag) - dregs->csr |= CSR_SEND; - else - dregs->csr &= ~CSR_SEND; - - /* reset fifo */ -// dregs->csr &= ~CSR_FIFO; -// dregs->csr |= CSR_FIFO; - - dregs->csr |= CSR_PACK_ENABLE; - - dregs->dma_addr_hi = ((unsigned long)addr >> 16); - dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); - - dregs->dma_count_hi = 0; - dregs->dma_count_lo = 0; - dregs->fifo_count_hi = 0; - dregs->fifo_count = 0; - -#ifdef SUN3_SCSI_DEBUG - printk("scsi: dma_setup done csr %x\n", dregs->csr); -#endif - return count; - -} - -static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) -{ - return last_residual; -} - -static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, - struct scsi_cmnd *cmd, - int write_flag) -{ - if (cmd->request->cmd_type == REQ_TYPE_FS) - return wanted; - else - return 0; -} - -static int sun3scsi_dma_start(unsigned long count, char *data) -{ - - unsigned short csr; - - csr = dregs->csr; -#ifdef SUN3_SCSI_DEBUG - printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count); -#endif - - dregs->dma_count_hi = (sun3_dma_orig_count >> 16); - dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); - - dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); - dregs->fifo_count = (sun3_dma_orig_count & 0xffff); - -// if(!(csr & CSR_DMA_ENABLE)) -// dregs->csr |= CSR_DMA_ENABLE; - - return 0; -} - -/* clean up after our dma is done */ -static int sun3scsi_dma_finish(int write_flag) -{ - unsigned short fifo; - int ret = 0; - - sun3_dma_active = 0; - - dregs->csr &= ~CSR_DMA_ENABLE; - - fifo = dregs->fifo_count; - if(write_flag) { - if((fifo > 0) && (fifo < sun3_dma_orig_count)) - fifo++; - } - - last_residual = fifo; -#ifdef SUN3_SCSI_DEBUG - printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count); -#endif - /* empty bytes from the fifo which didn't make it */ - if((!write_flag) && (dregs->csr & CSR_LEFT)) { - unsigned char *vaddr; - -#ifdef SUN3_SCSI_DEBUG - printk("scsi: got left over bytes\n"); -#endif - - vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); - - vaddr += (sun3_dma_orig_count - fifo); - vaddr--; - - switch(dregs->csr & CSR_LEFT) { - case CSR_LEFT_3: - *vaddr = (dregs->bpack_lo & 0xff00) >> 8; - vaddr--; - - case CSR_LEFT_2: - *vaddr = (dregs->bpack_hi & 0x00ff); - vaddr--; - - case CSR_LEFT_1: - *vaddr = (dregs->bpack_hi & 0xff00) >> 8; - break; - } - - - } - - dvma_unmap(sun3_dma_orig_addr); - sun3_dma_orig_addr = NULL; - - dregs->dma_addr_hi = 0; - dregs->dma_addr_lo = 0; - dregs->dma_count_hi = 0; - dregs->dma_count_lo = 0; - - dregs->fifo_count = 0; - dregs->fifo_count_hi = 0; - - dregs->csr &= ~CSR_SEND; - -// dregs->csr |= CSR_DMA_ENABLE; - -#if 0 - /* reset fifo */ - dregs->csr &= ~CSR_FIFO; - dregs->csr |= CSR_FIFO; -#endif - sun3_dma_setup_done = NULL; - - return ret; - -} - -#include "sun3_NCR5380.c" - -static struct scsi_host_template driver_template = { - .name = SUN3_SCSI_NAME, - .detect = sun3scsi_detect, - .release = sun3scsi_release, - .info = sun3scsi_info, - .queuecommand = sun3scsi_queue_command, - .eh_abort_handler = sun3scsi_abort, - .eh_bus_reset_handler = sun3scsi_bus_reset, - .can_queue = CAN_QUEUE, - .this_id = 7, - .sg_tablesize = SG_TABLESIZE, - .cmd_per_lun = CMD_PER_LUN, - .use_clustering = DISABLE_CLUSTERING -}; - - -#include "scsi_module.c" - -MODULE_LICENSE("GPL"); +#include "sun3_scsi.c" diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 599568299fb..6d3ee1ab636 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1171,112 +1171,36 @@ printk("sym_user_command: data=%ld\n", uc->data); #endif /* SYM_LINUX_USER_COMMAND_SUPPORT */ -#ifdef SYM_LINUX_USER_INFO_SUPPORT -/* - * Informations through the proc file system. - */ -struct info_str { - char *buffer; - int length; - int offset; - int pos; -}; - -static void copy_mem_info(struct info_str *info, char *data, int len) -{ - if (info->pos + len > info->length) - len = info->length - info->pos; - - if (info->pos + len < info->offset) { - info->pos += len; - return; - } - if (info->pos < info->offset) { - data += (info->offset - info->pos); - len -= (info->offset - info->pos); - } - - if (len > 0) { - memcpy(info->buffer + info->pos, data, len); - info->pos += len; - } -} - -static int copy_info(struct info_str *info, char *fmt, ...) -{ - va_list args; - char buf[81]; - int len; - - va_start(args, fmt); - len = vsprintf(buf, fmt, args); - va_end(args); - - copy_mem_info(info, buf, len); - return len; -} - /* * Copy formatted information into the input buffer. */ -static int sym_host_info(struct Scsi_Host *shost, char *ptr, off_t offset, int len) +static int sym_show_info(struct seq_file *m, struct Scsi_Host *shost) { +#ifdef SYM_LINUX_USER_INFO_SUPPORT struct sym_data *sym_data = shost_priv(shost); struct pci_dev *pdev = sym_data->pdev; struct sym_hcb *np = sym_data->ncb; - struct info_str info; - - info.buffer = ptr; - info.length = len; - info.offset = offset; - info.pos = 0; - copy_info(&info, "Chip " NAME53C "%s, device id 0x%x, " - "revision id 0x%x\n", np->s.chip_name, - pdev->device, pdev->revision); - copy_info(&info, "At PCI address %s, IRQ %u\n", + seq_printf(m, "Chip " NAME53C "%s, device id 0x%x, " + "revision id 0x%x\n", np->s.chip_name, + pdev->device, pdev->revision); + seq_printf(m, "At PCI address %s, IRQ %u\n", pci_name(pdev), pdev->irq); - copy_info(&info, "Min. period factor %d, %s SCSI BUS%s\n", - (int) (np->minsync_dt ? np->minsync_dt : np->minsync), - np->maxwide ? "Wide" : "Narrow", - np->minsync_dt ? ", DT capable" : ""); + seq_printf(m, "Min. period factor %d, %s SCSI BUS%s\n", + (int) (np->minsync_dt ? np->minsync_dt : np->minsync), + np->maxwide ? "Wide" : "Narrow", + np->minsync_dt ? ", DT capable" : ""); - copy_info(&info, "Max. started commands %d, " - "max. commands per LUN %d\n", - SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); + seq_printf(m, "Max. started commands %d, " + "max. commands per LUN %d\n", + SYM_CONF_MAX_START, SYM_CONF_MAX_TAG); - return info.pos > info.offset? info.pos - info.offset : 0; -} -#endif /* SYM_LINUX_USER_INFO_SUPPORT */ - -/* - * Entry point of the scsi proc fs of the driver. - * - func = 0 means read (returns adapter infos) - * - func = 1 means write (not yet merget from sym53c8xx) - */ -static int sym53c8xx_proc_info(struct Scsi_Host *shost, char *buffer, - char **start, off_t offset, int length, int func) -{ - int retv; - - if (func) { -#ifdef SYM_LINUX_USER_COMMAND_SUPPORT - retv = sym_user_command(shost, buffer, length); -#else - retv = -EINVAL; -#endif - } else { - if (start) - *start = buffer; -#ifdef SYM_LINUX_USER_INFO_SUPPORT - retv = sym_host_info(shost, buffer, offset, length); + return 0; #else - retv = -EINVAL; -#endif - } - - return retv; + return -EINVAL; +#endif /* SYM_LINUX_USER_INFO_SUPPORT */ } + #endif /* SYM_LINUX_PROC_INFO_SUPPORT */ /* @@ -1607,7 +1531,7 @@ static int sym_iomap_device(struct sym_device *device) struct pci_bus_region bus_addr; int i = 2; - pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); + pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]); device->mmio_base = bus_addr.start; if (device->chip.features & FE_RAM) { @@ -1617,7 +1541,8 @@ static int sym_iomap_device(struct sym_device *device) */ if (!pdev->resource[i].flags) i++; - pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); + pcibios_resource_to_bus(pdev->bus, &bus_addr, + &pdev->resource[i]); device->ram_base = bus_addr.start; } @@ -1742,7 +1667,10 @@ static struct scsi_host_template sym2_template = { .use_clustering = ENABLE_CLUSTERING, .max_sectors = 0xFFFF, #ifdef SYM_LINUX_PROC_INFO_SUPPORT - .proc_info = sym53c8xx_proc_info, + .show_info = sym_show_info, +#ifdef SYM_LINUX_USER_COMMAND_SUPPORT + .write_info = sym_user_command, +#endif .proc_name = NAME53C8XX, #endif }; diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h index b80bf709f10..805369521df 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.h +++ b/drivers/scsi/sym53c8xx_2/sym_glue.h @@ -174,7 +174,7 @@ struct sym_slcb { */ struct sym_shcb { /* - * Chip and controller indentification. + * Chip and controller identification. */ int unit; char inst_name[16]; diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index d92fe4037e9..6b349e30186 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); +#else + sym_set_cam_status(cp->cmd, DID_REQUEUE); +#endif sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index d672d97fb84..8cc80931df1 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -102,10 +102,6 @@ * 15 9-11 */ -/* - * $Log: t128.c,v $ - */ - #include <linux/signal.h> #include <linux/io.h> #include <linux/blkdev.h> @@ -201,7 +197,8 @@ int __init t128_detect(struct scsi_host_template * tpnt){ int sig, count; tpnt->proc_name = "t128"; - tpnt->proc_info = &t128_proc_info; + tpnt->show_info = t128_show_info; + tpnt->write_info = t128_write_info; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { base = 0; @@ -258,7 +255,7 @@ found: instance->irq = NCR5380_probe_irq(instance, T128_IRQS); if (instance->irq != SCSI_IRQ_NONE) - if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", + if (request_irq(instance->irq, t128_intr, 0, "t128", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index ada1115079c..fd68cecc62a 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h @@ -34,10 +34,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: t128.h,v $ - */ - #ifndef T128_H #define T128_H @@ -107,8 +103,6 @@ static int t128_bus_reset(struct scsi_cmnd *); #define CAN_QUEUE 32 #endif -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ void __iomem *base @@ -140,13 +134,13 @@ static int t128_bus_reset(struct scsi_cmnd *); #define NCR5380_queue_command t128_queue_command #define NCR5380_abort t128_abort #define NCR5380_bus_reset t128_bus_reset -#define NCR5380_proc_info t128_proc_info +#define NCR5380_show_info t128_show_info +#define NCR5380_write_info t128_write_info /* 15 14 12 10 7 5 3 1101 0100 1010 1000 */ #define T128_IRQS 0xc4a8 -#endif /* else def HOSTS_C */ #endif /* ndef ASM */ #endif /* T128_H */ diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 9327f5fcec4..b006cf789ba 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c @@ -521,7 +521,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr pACB->SelConn++; return 1; } - if (time_before (jiffies, pACB->pScsiHost->last_reset)) + if (time_before (jiffies, pACB->last_reset)) { DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n")); return 1; @@ -1863,7 +1863,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB ) /* delay half a second */ udelay (1000); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); - pACB->pScsiHost->last_reset = jiffies + 5*HZ/2 + pACB->last_reset = jiffies + 5*HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; pACB->Connected = 0; @@ -2048,9 +2048,9 @@ static int DC390_bus_reset (struct scsi_cmnd *cmd) dc390_ResetDevParam(pACB); mdelay(1); - pACB->pScsiHost->last_reset = jiffies + 3*HZ/2 + pACB->last_reset = jiffies + 3*HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; - + DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); DC390_read8(INT_Status); /* Reset Pending INT */ @@ -2383,7 +2383,7 @@ static void dc390_init_hw(struct dc390_acb *pACB, u8 index) if (pACB->Gmode2 & RST_SCSI_BUS) { dc390_ResetSCSIBus(pACB); udelay(1000); - shost->last_reset = jiffies + HZ/2 + + pACB->last_reset = jiffies + HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; } @@ -2455,8 +2455,8 @@ static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->irq = pdev->irq; shost->base = io_port; shost->unique_id = io_port; - shost->last_reset = jiffies; - + + pACB->last_reset = jiffies; pACB->pScsiHost = shost; pACB->IOPortBase = (u16) io_port; pACB->IRQLevel = pdev->irq; @@ -2553,7 +2553,6 @@ static void dc390_remove_one(struct pci_dev *dev) pci_disable_device(dev); scsi_host_put(scsi_host); - pci_set_drvdata(dev, NULL); } static struct pci_device_id tmscsim_pci_tbl[] = { diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h index 77adc54dbd1..3d1bb4ad182 100644 --- a/drivers/scsi/tmscsim.h +++ b/drivers/scsi/tmscsim.h @@ -143,6 +143,7 @@ u8 Ignore_IRQ; /* Not used */ struct pci_dev *pdev; +unsigned long last_reset; unsigned long Cmds; u32 SelLost; u32 SelConn; diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 9c216e56356..5a03bb3bcfe 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -873,7 +873,7 @@ static int port_detect \ /* Board detected, allocate its IRQ */ if (request_irq(irq, do_interrupt_handler, - IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), + (subversion == ESA) ? IRQF_SHARED : 0, driver_name, (void *) &sha[j])) { printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq); goto freelock; diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 0371047c592..f07f90179bb 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -34,7 +34,7 @@ config SCSI_UFSHCD tristate "Universal Flash Storage Controller Driver Core" - depends on SCSI + depends on SCSI && SCSI_DMA ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card @@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI If you have a controller with this interface, say Y or M here. If unsure, say N. + +config SCSI_UFSHCD_PLATFORM + tristate "Platform bus based UFS Controller support" + depends on SCSI_UFSHCD + ---help--- + This selects the UFS host controller support. Select this if + you have an UFS controller on Platform bus. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 9eda0dfbd6d..1e5bd48457d 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -1,3 +1,4 @@ # UFSHCD makefile obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o +obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 139bc0647b4..f42d1cee652 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -36,10 +36,17 @@ #ifndef _UFS_H #define _UFS_H +#include <linux/mutex.h> +#include <linux/types.h> + #define MAX_CDB_SIZE 16 +#define GENERAL_UPIU_REQUEST_SIZE 32 +#define QUERY_DESC_MAX_SIZE 256 +#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ + (sizeof(struct utp_upiu_header))) #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ - ((byte3 << 24) | (byte2 << 16) |\ + cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ (byte1 << 8) | (byte0)) /* @@ -62,7 +69,7 @@ enum { UPIU_TRANSACTION_COMMAND = 0x01, UPIU_TRANSACTION_DATA_OUT = 0x02, UPIU_TRANSACTION_TASK_REQ = 0x04, - UPIU_TRANSACTION_QUERY_REQ = 0x26, + UPIU_TRANSACTION_QUERY_REQ = 0x16, }; /* UTP UPIU Transaction Codes Target to Initiator */ @@ -73,6 +80,7 @@ enum { UPIU_TRANSACTION_TASK_RSP = 0x24, UPIU_TRANSACTION_READY_XFER = 0x31, UPIU_TRANSACTION_QUERY_RSP = 0x36, + UPIU_TRANSACTION_REJECT_UPIU = 0x3F, }; /* UPIU Read/Write flags */ @@ -90,8 +98,41 @@ enum { UPIU_TASK_ATTR_ACA = 0x03, }; -/* UTP QUERY Transaction Specific Fields OpCode */ +/* UPIU Query request function */ +enum { + UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01, + UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81, +}; + +/* Flag idn for Query Requests*/ +enum flag_idn { + QUERY_FLAG_IDN_FDEVICEINIT = 0x01, + QUERY_FLAG_IDN_BKOPS_EN = 0x04, +}; + +/* Attribute idn for Query requests */ +enum attr_idn { + QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, + QUERY_ATTR_IDN_EE_CONTROL = 0x0D, + QUERY_ATTR_IDN_EE_STATUS = 0x0E, +}; + +/* Exception event mask values */ +enum { + MASK_EE_STATUS = 0xFFFF, + MASK_EE_URGENT_BKOPS = (1 << 2), +}; + +/* Background operation status */ enum { + BKOPS_STATUS_NO_OP = 0x0, + BKOPS_STATUS_NON_CRITICAL = 0x1, + BKOPS_STATUS_PERF_IMPACT = 0x2, + BKOPS_STATUS_CRITICAL = 0x3, +}; + +/* UTP QUERY Transaction Specific Fields OpCode */ +enum query_opcode { UPIU_QUERY_OPCODE_NOP = 0x0, UPIU_QUERY_OPCODE_READ_DESC = 0x1, UPIU_QUERY_OPCODE_WRITE_DESC = 0x2, @@ -103,6 +144,21 @@ enum { UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8, }; +/* Query response result code */ +enum { + QUERY_RESULT_SUCCESS = 0x00, + QUERY_RESULT_NOT_READABLE = 0xF6, + QUERY_RESULT_NOT_WRITEABLE = 0xF7, + QUERY_RESULT_ALREADY_WRITTEN = 0xF8, + QUERY_RESULT_INVALID_LENGTH = 0xF9, + QUERY_RESULT_INVALID_VALUE = 0xFA, + QUERY_RESULT_INVALID_SELECTOR = 0xFB, + QUERY_RESULT_INVALID_INDEX = 0xFC, + QUERY_RESULT_INVALID_IDN = 0xFD, + QUERY_RESULT_INVALID_OPCODE = 0xFE, + QUERY_RESULT_GENERAL_FAILURE = 0xFF, +}; + /* UTP Transfer Request Command Type (CT) */ enum { UPIU_COMMAND_SET_TYPE_SCSI = 0x0, @@ -110,10 +166,19 @@ enum { UPIU_COMMAND_SET_TYPE_QUERY = 0x2, }; +/* UTP Transfer Request Command Offset */ +#define UPIU_COMMAND_TYPE_OFFSET 28 + +/* Offset of the response code in the UPIU header */ +#define UPIU_RSP_CODE_OFFSET 8 + enum { - MASK_SCSI_STATUS = 0xFF, - MASK_TASK_RESPONSE = 0xFF00, - MASK_RSP_UPIU_RESULT = 0xFFFF, + MASK_SCSI_STATUS = 0xFF, + MASK_TASK_RESPONSE = 0xFF00, + MASK_RSP_UPIU_RESULT = 0xFFFF, + MASK_QUERY_DATA_SEG_LEN = 0xFFFF, + MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF, + MASK_RSP_EXCEPTION_EVENT = 0x10000, }; /* Task management service response */ @@ -131,37 +196,84 @@ enum { * @dword_2: UPIU header DW-2 */ struct utp_upiu_header { - u32 dword_0; - u32 dword_1; - u32 dword_2; + __be32 dword_0; + __be32 dword_1; + __be32 dword_2; }; /** * struct utp_upiu_cmd - Command UPIU structure - * @header: UPIU header structure DW-0 to DW-2 * @data_transfer_len: Data Transfer Length DW-3 * @cdb: Command Descriptor Block CDB DW-4 to DW-7 */ struct utp_upiu_cmd { - struct utp_upiu_header header; - u32 exp_data_transfer_len; + __be32 exp_data_transfer_len; u8 cdb[MAX_CDB_SIZE]; }; /** - * struct utp_upiu_rsp - Response UPIU structure - * @header: UPIU header DW-0 to DW-2 + * struct utp_upiu_query - upiu request buffer structure for + * query request. + * @opcode: command to perform B-0 + * @idn: a value that indicates the particular type of data B-1 + * @index: Index to further identify data B-2 + * @selector: Index to further identify data B-3 + * @reserved_osf: spec reserved field B-4,5 + * @length: number of descriptor bytes to read/write B-6,7 + * @value: Attribute value to be written DW-5 + * @reserved: spec reserved DW-6,7 + */ +struct utp_upiu_query { + u8 opcode; + u8 idn; + u8 index; + u8 selector; + __be16 reserved_osf; + __be16 length; + __be32 value; + __be32 reserved[2]; +}; + +/** + * struct utp_upiu_req - general upiu request structure + * @header:UPIU header structure DW-0 to DW-2 + * @sc: fields structure for scsi command DW-3 to DW-7 + * @qr: fields structure for query request DW-3 to DW-7 + */ +struct utp_upiu_req { + struct utp_upiu_header header; + union { + struct utp_upiu_cmd sc; + struct utp_upiu_query qr; + }; +}; + +/** + * struct utp_cmd_rsp - Response UPIU structure * @residual_transfer_count: Residual transfer count DW-3 * @reserved: Reserved double words DW-4 to DW-7 * @sense_data_len: Sense data length DW-8 U16 * @sense_data: Sense data field DW-8 to DW-12 */ +struct utp_cmd_rsp { + __be32 residual_transfer_count; + __be32 reserved[4]; + __be16 sense_data_len; + u8 sense_data[18]; +}; + +/** + * struct utp_upiu_rsp - general upiu response structure + * @header: UPIU header structure DW-0 to DW-2 + * @sr: fields structure for scsi command DW-3 to DW-12 + * @qr: fields structure for query request DW-3 to DW-7 + */ struct utp_upiu_rsp { struct utp_upiu_header header; - u32 residual_transfer_count; - u32 reserved[4]; - u16 sense_data_len; - u8 sense_data[18]; + union { + struct utp_cmd_rsp sr; + struct utp_upiu_query qr; + }; }; /** @@ -174,10 +286,10 @@ struct utp_upiu_rsp { */ struct utp_upiu_task_req { struct utp_upiu_header header; - u32 input_param1; - u32 input_param2; - u32 input_param3; - u32 reserved[2]; + __be32 input_param1; + __be32 input_param2; + __be32 input_param3; + __be32 reserved[2]; }; /** @@ -189,9 +301,29 @@ struct utp_upiu_task_req { */ struct utp_upiu_task_rsp { struct utp_upiu_header header; - u32 output_param1; - u32 output_param2; - u32 reserved[3]; + __be32 output_param1; + __be32 output_param2; + __be32 reserved[3]; +}; + +/** + * struct ufs_query_req - parameters for building a query request + * @query_func: UPIU header query function + * @upiu_req: the query request data + */ +struct ufs_query_req { + u8 query_func; + struct utp_upiu_query upiu_req; +}; + +/** + * struct ufs_query_resp - UPIU QUERY + * @response: device response code + * @upiu_res: query response data + */ +struct ufs_query_res { + u8 response; + struct utp_upiu_query upiu_res; }; #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 5cb1d75f586..8b9531204c2 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -35,6 +35,7 @@ #include "ufshcd.h" #include <linux/pci.h> +#include <linux/pm_runtime.h> #ifdef CONFIG_PM /** @@ -44,7 +45,7 @@ * * Returns -ENOSYS */ -static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int ufshcd_pci_suspend(struct device *dev) { /* * TODO: @@ -61,7 +62,7 @@ static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state) * * Returns -ENOSYS */ -static int ufshcd_pci_resume(struct pci_dev *pdev) +static int ufshcd_pci_resume(struct device *dev) { /* * TODO: @@ -71,8 +72,45 @@ static int ufshcd_pci_resume(struct pci_dev *pdev) return -ENOSYS; } +#else +#define ufshcd_pci_suspend NULL +#define ufshcd_pci_resume NULL #endif /* CONFIG_PM */ +#ifdef CONFIG_PM_RUNTIME +static int ufshcd_pci_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_suspend(hba); +} +static int ufshcd_pci_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_resume(hba); +} +static int ufshcd_pci_runtime_idle(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_idle(hba); +} +#else /* !CONFIG_PM_RUNTIME */ +#define ufshcd_pci_runtime_suspend NULL +#define ufshcd_pci_runtime_resume NULL +#define ufshcd_pci_runtime_idle NULL +#endif /* CONFIG_PM_RUNTIME */ + /** * ufshcd_pci_shutdown - main function to put the controller in reset state * @pdev: pointer to PCI device handle @@ -91,13 +129,9 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) { struct ufs_hba *hba = pci_get_drvdata(pdev); - disable_irq(pdev->irq); - free_irq(pdev->irq, hba); + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); ufshcd_remove(hba); - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - pci_clear_master(pdev); - pci_disable_device(pdev); } /** @@ -134,55 +168,49 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem *mmio_base; int err; - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "pci_enable_device failed\n"); - goto out_error; + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + return err; } pci_set_master(pdev); - - err = pci_request_regions(pdev, UFSHCD); + err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); if (err < 0) { - dev_err(&pdev->dev, "request regions failed\n"); - goto out_disable; + dev_err(&pdev->dev, "request and iomap failed\n"); + return err; } - mmio_base = pci_ioremap_bar(pdev, 0); - if (!mmio_base) { - dev_err(&pdev->dev, "memory map failed\n"); - err = -ENOMEM; - goto out_release_regions; - } + mmio_base = pcim_iomap_table(pdev)[0]; err = ufshcd_set_dma_mask(pdev); if (err) { dev_err(&pdev->dev, "set dma mask failed\n"); - goto out_iounmap; + return err; } err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); - goto out_iounmap; + return err; } pci_set_drvdata(pdev, hba); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); return 0; - -out_iounmap: - iounmap(mmio_base); -out_release_regions: - pci_release_regions(pdev); -out_disable: - pci_clear_master(pdev); - pci_disable_device(pdev); -out_error: - return err; } +static const struct dev_pm_ops ufshcd_pci_pm_ops = { + .suspend = ufshcd_pci_suspend, + .resume = ufshcd_pci_resume, + .runtime_suspend = ufshcd_pci_runtime_suspend, + .runtime_resume = ufshcd_pci_runtime_resume, + .runtime_idle = ufshcd_pci_runtime_idle, +}; + static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = { { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { } /* terminate list */ @@ -196,10 +224,9 @@ static struct pci_driver ufshcd_pci_driver = { .probe = ufshcd_pci_probe, .remove = ufshcd_pci_remove, .shutdown = ufshcd_pci_shutdown, -#ifdef CONFIG_PM - .suspend = ufshcd_pci_suspend, - .resume = ufshcd_pci_resume, -#endif + .driver = { + .pm = &ufshcd_pci_pm_ops + }, }; module_pci_driver(ufshcd_pci_driver); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c new file mode 100644 index 00000000000..5e462322542 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -0,0 +1,218 @@ +/* + * Universal Flash Storage Host controller Platform bus based glue driver + * + * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi <santosh.sy@samsung.com> + * Vinayak Holikatti <h.vinayak@samsung.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * See the COPYING file in the top-level directory or visit + * <http://www.gnu.org/licenses/gpl-2.0.html> + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This program is provided "AS IS" and "WITH ALL FAULTS" and + * without warranty of any kind. You are solely responsible for + * determining the appropriateness of using and distributing + * the program and assume all risks associated with your exercise + * of rights with respect to the program, including but not limited + * to infringement of third party rights, the risks and costs of + * program errors, damage to or loss of data, programs or equipment, + * and unavailability or interruption of operations. Under no + * circumstances will the contributor of this Program be liable for + * any damages of any kind arising from your use or distribution of + * this program. + */ + +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#include "ufshcd.h" + +#ifdef CONFIG_PM +/** + * ufshcd_pltfrm_suspend - suspend power management function + * @dev: pointer to device handle + * + * + * Returns 0 + */ +static int ufshcd_pltfrm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_suspend + * 2. Do bus specific power management + */ + + disable_irq(hba->irq); + + return 0; +} + +/** + * ufshcd_pltfrm_resume - resume power management function + * @dev: pointer to device handle + * + * Returns 0 + */ +static int ufshcd_pltfrm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_resume. + * 2. Do bus specific wake up + */ + + enable_irq(hba->irq); + + return 0; +} +#else +#define ufshcd_pltfrm_suspend NULL +#define ufshcd_pltfrm_resume NULL +#endif + +#ifdef CONFIG_PM_RUNTIME +static int ufshcd_pltfrm_runtime_suspend(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_suspend(hba); +} +static int ufshcd_pltfrm_runtime_resume(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_resume(hba); +} +static int ufshcd_pltfrm_runtime_idle(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!hba) + return 0; + + return ufshcd_runtime_idle(hba); +} +#else /* !CONFIG_PM_RUNTIME */ +#define ufshcd_pltfrm_runtime_suspend NULL +#define ufshcd_pltfrm_runtime_resume NULL +#define ufshcd_pltfrm_runtime_idle NULL +#endif /* CONFIG_PM_RUNTIME */ + +/** + * ufshcd_pltfrm_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_probe(struct platform_device *pdev) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + struct resource *mem_res; + int irq, err; + struct device *dev = &pdev->dev; + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmio_base = devm_ioremap_resource(dev, mem_res); + if (IS_ERR(mmio_base)) { + err = PTR_ERR(mmio_base); + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "IRQ resource not available\n"); + err = -ENODEV; + goto out; + } + + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + err = ufshcd_init(dev, &hba, mmio_base, irq); + if (err) { + dev_err(dev, "Intialization failed\n"); + goto out_disable_rpm; + } + + platform_set_drvdata(pdev, hba); + + return 0; + +out_disable_rpm: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); +out: + return err; +} + +/** + * ufshcd_pltfrm_remove - remove platform driver routine + * @pdev: pointer to platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +static const struct of_device_id ufs_of_match[] = { + { .compatible = "jedec,ufs-1.1"}, + {}, +}; + +static const struct dev_pm_ops ufshcd_dev_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, + .runtime_suspend = ufshcd_pltfrm_runtime_suspend, + .runtime_resume = ufshcd_pltfrm_runtime_resume, + .runtime_idle = ufshcd_pltfrm_runtime_idle, +}; + +static struct platform_driver ufshcd_pltfrm_driver = { + .probe = ufshcd_pltfrm_probe, + .remove = ufshcd_pltfrm_remove, + .driver = { + .name = "ufshcd", + .owner = THIS_MODULE, + .pm = &ufshcd_dev_pm_ops, + .of_match_table = ufs_of_match, + }, +}; + +module_platform_driver(ufshcd_pltfrm_driver); + +MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>"); +MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>"); +MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 60fd40c4e4c..0c287725125 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -33,7 +33,36 @@ * this program. */ +#include <linux/async.h> + #include "ufshcd.h" +#include "unipro.h" + +#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ + UTP_TASK_REQ_COMPL |\ + UIC_POWER_MODE |\ + UFSHCD_ERROR_MASK) +/* UIC command timeout, unit: ms */ +#define UIC_CMD_TIMEOUT 500 + +/* NOP OUT retries waiting for NOP IN response */ +#define NOP_OUT_RETRIES 10 +/* Timeout after 30 msecs if NOP OUT hangs without response */ +#define NOP_OUT_TIMEOUT 30 /* msecs */ + +/* Query request retries */ +#define QUERY_REQ_RETRIES 10 +/* Query request timeout */ +#define QUERY_REQ_TIMEOUT 30 /* msec */ + +/* Task management command timeout */ +#define TM_CMD_TIMEOUT 100 /* msecs */ + +/* Expose the flag value from utp_upiu_query.value */ +#define MASK_QUERY_UPIU_FLAG_LOC 0xFF + +/* Interrupt aggregation default timeout, unit: 40us */ +#define INT_AGGR_DEF_TO 0x02 enum { UFSHCD_MAX_CHANNEL = 0, @@ -45,9 +74,22 @@ enum { /* UFSHCD states */ enum { - UFSHCD_STATE_OPERATIONAL, UFSHCD_STATE_RESET, UFSHCD_STATE_ERROR, + UFSHCD_STATE_OPERATIONAL, +}; + +/* UFSHCD error handling flags */ +enum { + UFSHCD_EH_IN_PROGRESS = (1 << 0), +}; + +/* UFSHCD UIC layer error flags */ +enum { + UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ + UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ + UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ + UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ }; /* Interrupt configuration options */ @@ -57,11 +99,65 @@ enum { UFSHCD_INT_CLEAR, }; -/* Interrupt aggregation options */ -enum { - INT_AGGR_RESET, - INT_AGGR_CONFIG, -}; +#define ufshcd_set_eh_in_progress(h) \ + (h->eh_flags |= UFSHCD_EH_IN_PROGRESS) +#define ufshcd_eh_in_progress(h) \ + (h->eh_flags & UFSHCD_EH_IN_PROGRESS) +#define ufshcd_clear_eh_in_progress(h) \ + (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) + +static void ufshcd_tmc_handler(struct ufs_hba *hba); +static void ufshcd_async_scan(void *data, async_cookie_t cookie); +static int ufshcd_reset_and_restore(struct ufs_hba *hba); +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); + +/* + * ufshcd_wait_for_register - wait for register value to change + * @hba - per-adapter interface + * @reg - mmio register offset + * @mask - mask to apply to read register value + * @val - wait condition + * @interval_us - polling interval in microsecs + * @timeout_ms - timeout in millisecs + * + * Returns -ETIMEDOUT on error, zero on success + */ +static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, + u32 val, unsigned long interval_us, unsigned long timeout_ms) +{ + int err = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + /* ignore bits that we don't intend to wait on */ + val = val & mask; + + while ((ufshcd_readl(hba, reg) & mask) != val) { + /* wakeup within 50us of expiry */ + usleep_range(interval_us, interval_us + 50); + + if (time_after(jiffies, timeout)) { + if ((ufshcd_readl(hba, reg) & mask) != val) + err = -ETIMEDOUT; + break; + } + } + + return err; +} + +/** + * ufshcd_get_intr_mask - Get the interrupt bit mask + * @hba - Pointer to adapter instance + * + * Returns interrupt bit mask per version + */ +static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) +{ + if (hba->ufs_version == UFSHCI_VERSION_10) + return INTERRUPT_MASK_ALL_VER_10; + else + return INTERRUPT_MASK_ALL_VER_11; +} /** * ufshcd_get_ufs_version - Get the UFS version supported by the HBA @@ -71,7 +167,7 @@ enum { */ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) { - return readl(hba->mmio_base + REG_UFS_VERSION); + return ufshcd_readl(hba, REG_UFS_VERSION); } /** @@ -95,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs) */ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) { - return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; + return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; } /** @@ -108,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) static inline int ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) { - return task_req_descp->header.dword_2 & MASK_OCS; + return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS; } /** * ufshcd_get_tm_free_slot - get a free slot for task management request * @hba: per adapter instance + * @free_slot: pointer to variable with available slot value * - * Returns maximum number of task management request slots in case of - * task management queue full or returns the free slot number + * Get a free tag and lock it until ufshcd_put_tm_slot() is called. + * Returns 0 if free slot is not available, else return 1 with tag value + * in @free_slot. */ -static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) +static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) { - return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); + int tag; + bool ret = false; + + if (!free_slot) + goto out; + + do { + tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); + if (tag >= hba->nutmrs) + goto out; + } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); + + *free_slot = tag; + ret = true; +out: + return ret; +} + +static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) +{ + clear_bit_unlock(slot, &hba->tm_slots_in_use); } /** @@ -130,8 +248,7 @@ static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) */ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) { - writel(~(1 << pos), - (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR)); + ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); } /** @@ -165,55 +282,30 @@ static inline int ufshcd_get_lists_status(u32 reg) */ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) { - return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) & + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & MASK_UIC_COMMAND_RESULT; } /** - * ufshcd_free_hba_memory - Free allocated memory for LRB, request - * and task lists + * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command * @hba: Pointer to adapter instance + * + * This function gets UIC command argument3 + * Returns 0 on success, non zero value on error */ -static inline void ufshcd_free_hba_memory(struct ufs_hba *hba) +static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) { - size_t utmrdl_size, utrdl_size, ucdl_size; - - kfree(hba->lrb); - - if (hba->utmrdl_base_addr) { - utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; - dma_free_coherent(hba->dev, utmrdl_size, - hba->utmrdl_base_addr, hba->utmrdl_dma_addr); - } - - if (hba->utrdl_base_addr) { - utrdl_size = - (sizeof(struct utp_transfer_req_desc) * hba->nutrs); - dma_free_coherent(hba->dev, utrdl_size, - hba->utrdl_base_addr, hba->utrdl_dma_addr); - } - - if (hba->ucdl_base_addr) { - ucdl_size = - (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); - dma_free_coherent(hba->dev, ucdl_size, - hba->ucdl_base_addr, hba->ucdl_dma_addr); - } + return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); } /** - * ufshcd_is_valid_req_rsp - checks if controller TR response is valid + * ufshcd_get_req_rsp - returns the TR response transaction type * @ucd_rsp_ptr: pointer to response UPIU - * - * This function checks the response UPIU for valid transaction type in - * response field - * Returns 0 on success, non-zero on failure */ static inline int -ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) +ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) { - return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) == - UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16; + return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; } /** @@ -229,34 +321,60 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; } +/* + * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length + * from response UPIU + * @ucd_rsp_ptr: pointer to response UPIU + * + * Return the data segment length. + */ +static inline unsigned int +ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_UPIU_DATA_SEG_LEN; +} + /** - * ufshcd_config_int_aggr - Configure interrupt aggregation values. - * Currently there is no use case where we want to configure - * interrupt aggregation dynamically. So to configure interrupt - * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and - * INT_AGGR_TIMEOUT_VALUE are used. + * ufshcd_is_exception_event - Check if the device raised an exception event + * @ucd_rsp_ptr: pointer to response UPIU + * + * The function checks if the device raised an exception event indicated in + * the Device Information field of response UPIU. + * + * Returns true if exception is raised, false otherwise. + */ +static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) +{ + return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & + MASK_RSP_EXCEPTION_EVENT ? true : false; +} + +/** + * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. * @hba: per adapter instance - * @option: Interrupt aggregation option */ static inline void -ufshcd_config_int_aggr(struct ufs_hba *hba, int option) -{ - switch (option) { - case INT_AGGR_RESET: - writel((INT_AGGR_ENABLE | - INT_AGGR_COUNTER_AND_TIMER_RESET), - (hba->mmio_base + - REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL)); - break; - case INT_AGGR_CONFIG: - writel((INT_AGGR_ENABLE | - INT_AGGR_PARAM_WRITE | - INT_AGGR_COUNTER_THRESHOLD_VALUE | - INT_AGGR_TIMEOUT_VALUE), - (hba->mmio_base + - REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL)); - break; - } +ufshcd_reset_intr_aggr(struct ufs_hba *hba) +{ + ufshcd_writel(hba, INT_AGGR_ENABLE | + INT_AGGR_COUNTER_AND_TIMER_RESET, + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); +} + +/** + * ufshcd_config_intr_aggr - Configure interrupt aggregation values. + * @hba: per adapter instance + * @cnt: Interrupt aggregation counter threshold + * @tmout: Interrupt aggregation timeout value + */ +static inline void +ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) +{ + ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | + INT_AGGR_COUNTER_THLD_VAL(cnt) | + INT_AGGR_TIMEOUT_VAL(tmout), + REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); } /** @@ -267,12 +385,10 @@ ufshcd_config_int_aggr(struct ufs_hba *hba, int option) */ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) { - writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT, - (hba->mmio_base + - REG_UTP_TASK_REQ_LIST_RUN_STOP)); - writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, - (hba->mmio_base + - REG_UTP_TRANSFER_REQ_LIST_RUN_STOP)); + ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TASK_REQ_LIST_RUN_STOP); + ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, + REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); } /** @@ -281,7 +397,7 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) */ static inline void ufshcd_hba_start(struct ufs_hba *hba) { - writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE)); + ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); } /** @@ -292,7 +408,7 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba) */ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) { - return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; + return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; } /** @@ -304,8 +420,7 @@ static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { __set_bit(task_tag, &hba->outstanding_reqs); - writel((1 << task_tag), - (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL)); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); } /** @@ -315,22 +430,55 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) { int len; - if (lrbp->sense_buffer) { - len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len); + if (lrbp->sense_buffer && + ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { + len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); memcpy(lrbp->sense_buffer, - lrbp->ucd_rsp_ptr->sense_data, + lrbp->ucd_rsp_ptr->sr.sense_data, min_t(int, len, SCSI_SENSE_BUFFERSIZE)); } } /** + * ufshcd_copy_query_response() - Copy the Query Response and the data + * descriptor + * @hba: per adapter instance + * @lrb - pointer to local reference block + */ +static +void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufs_query_res *query_res = &hba->dev_cmd.query.response; + + /* Get the UPIU response */ + query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >> + UPIU_RSP_CODE_OFFSET; + + memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); + + + /* Get the descriptor */ + if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr + + GENERAL_UPIU_REQUEST_SIZE; + u16 len; + + /* data segment length */ + len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + + memcpy(hba->dev_cmd.query.descriptor, descp, + min_t(u16, len, QUERY_DESC_MAX_SIZE)); + } +} + +/** * ufshcd_hba_capabilities - Read controller capabilities * @hba: per adapter instance */ static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) { - hba->capabilities = - readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES); + hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; @@ -339,24 +487,131 @@ static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) } /** - * ufshcd_send_uic_command - Send UIC commands to unipro layers + * ufshcd_ready_for_uic_cmd - Check if controller is ready + * to accept UIC commands * @hba: per adapter instance - * @uic_command: UIC command + * Return true on success, else false + */ +static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) +{ + if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) + return true; + else + return false; +} + +/** + * ufshcd_get_upmcrs - Get the power mode change request status + * @hba: Pointer to adapter instance + * + * This function gets the UPMCRS field of HCS register + * Returns value of UPMCRS field + */ +static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) +{ + return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; +} + +/** + * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Mutex must be held. */ static inline void -ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd) +ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { + WARN_ON(hba->active_uic_cmd); + + hba->active_uic_cmd = uic_cmd; + /* Write Args */ - writel(uic_cmnd->argument1, - (hba->mmio_base + REG_UIC_COMMAND_ARG_1)); - writel(uic_cmnd->argument2, - (hba->mmio_base + REG_UIC_COMMAND_ARG_2)); - writel(uic_cmnd->argument3, - (hba->mmio_base + REG_UIC_COMMAND_ARG_3)); + ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); + ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); + ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); /* Write UIC Cmd */ - writel((uic_cmnd->command & COMMAND_OPCODE_MASK), - (hba->mmio_base + REG_UIC_COMMAND)); + ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, + REG_UIC_COMMAND); +} + +/** + * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command + * @hba: per adapter instance + * @uic_command: UIC command + * + * Must be called with mutex held. + * Returns 0 only if success. + */ +static int +ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + unsigned long flags; + + if (wait_for_completion_timeout(&uic_cmd->done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) + ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; + else + ret = -ETIMEDOUT; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->active_uic_cmd = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return ret; +} + +/** + * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called + * with mutex held. + * Returns 0 only if success. + */ +static int +__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + unsigned long flags; + + if (!ufshcd_ready_for_uic_cmd(hba)) { + dev_err(hba->dev, + "Controller not ready to accept UIC commands\n"); + return -EIO; + } + + init_completion(&uic_cmd->done); + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_dispatch_uic_cmd(hba, uic_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); + + return ret; +} + +/** + * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result + * @hba: per adapter instance + * @uic_cmd: UIC command + * + * Returns 0 only if success. + */ +static int +ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) +{ + int ret; + + mutex_lock(&hba->uic_cmd_mutex); + ret = __ufshcd_send_uic_cmd(hba, uic_cmd); + mutex_unlock(&hba->uic_cmd_mutex); + + return ret; } /** @@ -400,99 +655,211 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp) } /** - * ufshcd_int_config - enable/disable interrupts + * ufshcd_enable_intr - enable interrupts * @hba: per adapter instance - * @option: interrupt option + * @intrs: interrupt bits */ -static void ufshcd_int_config(struct ufs_hba *hba, u32 option) +static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) { - switch (option) { - case UFSHCD_INT_ENABLE: - writel(hba->int_enable_mask, - (hba->mmio_base + REG_INTERRUPT_ENABLE)); - break; - case UFSHCD_INT_DISABLE: - if (hba->ufs_version == UFSHCI_VERSION_10) - writel(INTERRUPT_DISABLE_MASK_10, - (hba->mmio_base + REG_INTERRUPT_ENABLE)); - else - writel(INTERRUPT_DISABLE_MASK_11, - (hba->mmio_base + REG_INTERRUPT_ENABLE)); - break; + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + if (hba->ufs_version == UFSHCI_VERSION_10) { + u32 rw; + rw = set & INTERRUPT_MASK_RW_VER_10; + set = rw | ((set ^ intrs) & intrs); + } else { + set |= intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_disable_intr - disable interrupts + * @hba: per adapter instance + * @intrs: interrupt bits + */ +static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) +{ + u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); + + if (hba->ufs_version == UFSHCI_VERSION_10) { + u32 rw; + rw = (set & INTERRUPT_MASK_RW_VER_10) & + ~(intrs & INTERRUPT_MASK_RW_VER_10); + set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10); + + } else { + set &= ~intrs; + } + + ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); +} + +/** + * ufshcd_prepare_req_desc_hdr() - Fills the requests header + * descriptor according to request + * @lrbp: pointer to local reference block + * @upiu_flags: flags required in the header + * @cmd_dir: requests data direction + */ +static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, + u32 *upiu_flags, enum dma_data_direction cmd_dir) +{ + struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; + u32 data_direction; + u32 dword_0; + + if (cmd_dir == DMA_FROM_DEVICE) { + data_direction = UTP_DEVICE_TO_HOST; + *upiu_flags = UPIU_CMD_FLAGS_READ; + } else if (cmd_dir == DMA_TO_DEVICE) { + data_direction = UTP_HOST_TO_DEVICE; + *upiu_flags = UPIU_CMD_FLAGS_WRITE; + } else { + data_direction = UTP_NO_DATA_TRANSFER; + *upiu_flags = UPIU_CMD_FLAGS_NONE; } + + dword_0 = data_direction | (lrbp->command_type + << UPIU_COMMAND_TYPE_OFFSET); + if (lrbp->intr_cmd) + dword_0 |= UTP_REQ_DESC_INT_CMD; + + /* Transfer request descriptor header fields */ + req_desc->header.dword_0 = cpu_to_le32(dword_0); + + /* + * assigning invalid value for command status. Controller + * updates OCS on command completion, with the command + * status + */ + req_desc->header.dword_2 = + cpu_to_le32(OCS_INVALID_COMMAND_STATUS); +} + +/** + * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc, + * for scsi commands + * @lrbp - local reference block pointer + * @upiu_flags - flags + */ +static +void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_COMMAND, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); + + /* Total EHS length and Data segment length will be zero */ + ucd_req_ptr->header.dword_2 = 0; + + ucd_req_ptr->sc.exp_data_transfer_len = + cpu_to_be32(lrbp->cmd->sdb.length); + + memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, + (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE))); +} + +/** + * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc, + * for query requsts + * @hba: UFS hba + * @lrbp: local reference block pointer + * @upiu_flags: flags + */ +static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, u32 upiu_flags) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + struct ufs_query *query = &hba->dev_cmd.query; + u16 len = be16_to_cpu(query->request.upiu_req.length); + u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; + + /* Query request header */ + ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD( + UPIU_TRANSACTION_QUERY_REQ, upiu_flags, + lrbp->lun, lrbp->task_tag); + ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD( + 0, query->request.query_func, 0, 0); + + /* Data segment length */ + ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD( + 0, 0, len >> 8, (u8)len); + + /* Copy the Query Request buffer as is */ + memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, + QUERY_OSF_SIZE); + + /* Copy the Descriptor */ + if ((len > 0) && (query->request.upiu_req.opcode == + UPIU_QUERY_OPCODE_WRITE_DESC)) { + memcpy(descp, query->descriptor, + min_t(u16, len, QUERY_DESC_MAX_SIZE)); + } +} + +static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) +{ + struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; + + memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); + + /* command descriptor fields */ + ucd_req_ptr->header.dword_0 = + UPIU_HEADER_DWORD( + UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag); } /** * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU) + * @hba - per adapter instance * @lrb - pointer to local reference block */ -static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) +static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { - struct utp_transfer_req_desc *req_desc; - struct utp_upiu_cmd *ucd_cmd_ptr; - u32 data_direction; u32 upiu_flags; - - ucd_cmd_ptr = lrbp->ucd_cmd_ptr; - req_desc = lrbp->utr_descriptor_ptr; + int ret = 0; switch (lrbp->command_type) { case UTP_CMD_TYPE_SCSI: - if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) { - data_direction = UTP_DEVICE_TO_HOST; - upiu_flags = UPIU_CMD_FLAGS_READ; - } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) { - data_direction = UTP_HOST_TO_DEVICE; - upiu_flags = UPIU_CMD_FLAGS_WRITE; + if (likely(lrbp->cmd)) { + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, + lrbp->cmd->sc_data_direction); + ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags); } else { - data_direction = UTP_NO_DATA_TRANSFER; - upiu_flags = UPIU_CMD_FLAGS_NONE; + ret = -EINVAL; } - - /* Transfer request descriptor header fields */ - req_desc->header.dword_0 = - cpu_to_le32(data_direction | UTP_SCSI_COMMAND); - - /* - * assigning invalid value for command status. Controller - * updates OCS on command completion, with the command - * status - */ - req_desc->header.dword_2 = - cpu_to_le32(OCS_INVALID_COMMAND_STATUS); - - /* command descriptor fields */ - ucd_cmd_ptr->header.dword_0 = - cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, - upiu_flags, - lrbp->lun, - lrbp->task_tag)); - ucd_cmd_ptr->header.dword_1 = - cpu_to_be32( - UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, - 0, - 0, - 0)); - - /* Total EHS length and Data segment length will be zero */ - ucd_cmd_ptr->header.dword_2 = 0; - - ucd_cmd_ptr->exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->transfersize); - - memcpy(ucd_cmd_ptr->cdb, - lrbp->cmd->cmnd, - (min_t(unsigned short, - lrbp->cmd->cmd_len, - MAX_CDB_SIZE))); break; case UTP_CMD_TYPE_DEV_MANAGE: - /* For query function implementation */ + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE); + if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) + ufshcd_prepare_utp_query_req_upiu( + hba, lrbp, upiu_flags); + else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) + ufshcd_prepare_utp_nop_upiu(lrbp); + else + ret = -EINVAL; break; case UTP_CMD_TYPE_UFS: /* For UFS native command implementation */ + ret = -ENOTSUPP; + dev_err(hba->dev, "%s: UFS native command are not supported\n", + __func__); + break; + default: + ret = -ENOTSUPP; + dev_err(hba->dev, "%s: unknown command type: 0x%x\n", + __func__, lrbp->command_type); break; } /* end of switch */ + + return ret; } /** @@ -514,31 +881,395 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) tag = cmd->request->tag; - if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { + spin_lock_irqsave(hba->host->host_lock, flags); + switch (hba->ufshcd_state) { + case UFSHCD_STATE_OPERATIONAL: + break; + case UFSHCD_STATE_RESET: + err = SCSI_MLQUEUE_HOST_BUSY; + goto out_unlock; + case UFSHCD_STATE_ERROR: + set_host_byte(cmd, DID_ERROR); + cmd->scsi_done(cmd); + goto out_unlock; + default: + dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", + __func__, hba->ufshcd_state); + set_host_byte(cmd, DID_BAD_TARGET); + cmd->scsi_done(cmd); + goto out_unlock; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* acquire the tag to make sure device cmds don't use it */ + if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { + /* + * Dev manage command in progress, requeue the command. + * Requeuing the command helps in cases where the request *may* + * find different tag instead of waiting for dev manage command + * completion. + */ err = SCSI_MLQUEUE_HOST_BUSY; goto out; } lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); lrbp->cmd = cmd; lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; lrbp->sense_buffer = cmd->sense_buffer; lrbp->task_tag = tag; lrbp->lun = cmd->device->lun; - + lrbp->intr_cmd = false; lrbp->command_type = UTP_CMD_TYPE_SCSI; /* form UPIU before issuing the command */ - ufshcd_compose_upiu(lrbp); + ufshcd_compose_upiu(hba, lrbp); err = ufshcd_map_sg(lrbp); - if (err) + if (err) { + lrbp->cmd = NULL; + clear_bit_unlock(tag, &hba->lrb_in_use); goto out; + } /* issue command to the controller */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_send_command(hba, tag); +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +out: + return err; +} + +static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag) +{ + lrbp->cmd = NULL; + lrbp->sense_bufflen = 0; + lrbp->sense_buffer = NULL; + lrbp->task_tag = tag; + lrbp->lun = 0; /* device management cmd is not specific to any LUN */ + lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE; + lrbp->intr_cmd = true; /* No interrupt aggregation */ + hba->dev_cmd.type = cmd_type; + + return ufshcd_compose_upiu(hba, lrbp); +} + +static int +ufshcd_clear_cmd(struct ufs_hba *hba, int tag) +{ + int err = 0; + unsigned long flags; + u32 mask = 1 << tag; + + /* clear outstanding transaction before retry */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_utrl_clear(hba, tag); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* + * wait for for h/w to clear corresponding bit in door-bell. + * max. wait is 1 sec. + */ + err = ufshcd_wait_for_register(hba, + REG_UTP_TRANSFER_REQ_DOOR_BELL, + mask, ~mask, 1000, 1000); + + return err; +} + +/** + * ufshcd_dev_cmd_completion() - handles device management command responses + * @hba: per adapter instance + * @lrbp: pointer to local reference block + */ +static int +ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + int resp; + int err = 0; + + resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); + + switch (resp) { + case UPIU_TRANSACTION_NOP_IN: + if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) { + err = -EINVAL; + dev_err(hba->dev, "%s: unexpected response %x\n", + __func__, resp); + } + break; + case UPIU_TRANSACTION_QUERY_RSP: + ufshcd_copy_query_response(hba, lrbp); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + err = -EPERM; + dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", + __func__); + break; + default: + err = -EINVAL; + dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", + __func__, resp); + break; + } + + return err; +} + +static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, int max_timeout) +{ + int err = 0; + unsigned long time_left; + unsigned long flags; + + time_left = wait_for_completion_timeout(hba->dev_cmd.complete, + msecs_to_jiffies(max_timeout)); + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->dev_cmd.complete = NULL; + if (likely(time_left)) { + err = ufshcd_get_tr_ocs(lrbp); + if (!err) + err = ufshcd_dev_cmd_completion(hba, lrbp); + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (!time_left) { + err = -ETIMEDOUT; + if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) + /* sucessfully cleared the command, retry if needed */ + err = -EAGAIN; + } + + return err; +} + +/** + * ufshcd_get_dev_cmd_tag - Get device management command tag + * @hba: per-adapter instance + * @tag: pointer to variable with available slot value + * + * Get a free slot and lock it until device management command + * completes. + * + * Returns false if free slot is unavailable for locking, else + * return true with tag value in @tag. + */ +static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) +{ + int tag; + bool ret = false; + unsigned long tmp; + + if (!tag_out) + goto out; + + do { + tmp = ~hba->lrb_in_use; + tag = find_last_bit(&tmp, hba->nutrs); + if (tag >= hba->nutrs) + goto out; + } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); + + *tag_out = tag; + ret = true; +out: + return ret; +} + +static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) +{ + clear_bit_unlock(tag, &hba->lrb_in_use); +} + +/** + * ufshcd_exec_dev_cmd - API for sending device management requests + * @hba - UFS hba + * @cmd_type - specifies the type (NOP, Query...) + * @timeout - time in seconds + * + * NOTE: Since there is only one available tag for device management commands, + * it is expected you hold the hba->dev_cmd.lock mutex. + */ +static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, + enum dev_cmd_type cmd_type, int timeout) +{ + struct ufshcd_lrb *lrbp; + int err; + int tag; + struct completion wait; + unsigned long flags; + + /* + * Get free slot, sleep if slots are unavailable. + * Even though we use wait_event() which sleeps indefinitely, + * the maximum wait time is bounded by SCSI request timeout. + */ + wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + + init_completion(&wait); + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); + if (unlikely(err)) + goto out_put_tag; + + hba->dev_cmd.complete = &wait; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_send_command(hba, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); + +out_put_tag: + ufshcd_put_dev_cmd_tag(hba, tag); + wake_up(&hba->dev_cmd.tag_wq); + return err; +} + +/** + * ufshcd_query_flag() - API function for sending flag query requests + * hba: per-adapter instance + * query_opcode: flag query to perform + * idn: flag idn to access + * flag_res: the flag value after the query request completes + * + * Returns 0 for success, non-zero in case of failure + */ +static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, + enum flag_idn idn, bool *flag_res) +{ + struct ufs_query_req *request; + struct ufs_query_res *response; + int err; + + BUG_ON(!hba); + + mutex_lock(&hba->dev_cmd.lock); + request = &hba->dev_cmd.query.request; + response = &hba->dev_cmd.query.response; + memset(request, 0, sizeof(struct ufs_query_req)); + memset(response, 0, sizeof(struct ufs_query_res)); + + switch (opcode) { + case UPIU_QUERY_OPCODE_SET_FLAG: + case UPIU_QUERY_OPCODE_CLEAR_FLAG: + case UPIU_QUERY_OPCODE_TOGGLE_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + break; + case UPIU_QUERY_OPCODE_READ_FLAG: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + if (!flag_res) { + /* No dummy reads */ + dev_err(hba->dev, "%s: Invalid argument for read request\n", + __func__); + err = -EINVAL; + goto out_unlock; + } + break; + default: + dev_err(hba->dev, + "%s: Expected query flag opcode but got = %d\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + request->upiu_req.opcode = opcode; + request->upiu_req.idn = idn; + + /* Send query request */ + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, + QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, + "%s: Sending flag query for idn %d failed, err = %d\n", + __func__, idn, err); + goto out_unlock; + } + + if (flag_res) + *flag_res = (be32_to_cpu(response->upiu_res.value) & + MASK_QUERY_UPIU_FLAG_LOC) & 0x1; + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); + return err; +} + +/** + * ufshcd_query_attr - API function for sending attribute requests + * hba: per-adapter instance + * opcode: attribute opcode + * idn: attribute idn to access + * index: index field + * selector: selector field + * attr_val: the attribute value after the query request completes + * + * Returns 0 for success, non-zero in case of failure +*/ +static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) +{ + struct ufs_query_req *request; + struct ufs_query_res *response; + int err; + + BUG_ON(!hba); + + if (!attr_val) { + dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", + __func__, opcode); + err = -EINVAL; + goto out; + } + + mutex_lock(&hba->dev_cmd.lock); + request = &hba->dev_cmd.query.request; + response = &hba->dev_cmd.query.response; + memset(request, 0, sizeof(struct ufs_query_req)); + memset(response, 0, sizeof(struct ufs_query_res)); + + switch (opcode) { + case UPIU_QUERY_OPCODE_WRITE_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; + request->upiu_req.value = cpu_to_be32(*attr_val); + break; + case UPIU_QUERY_OPCODE_READ_ATTR: + request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; + break; + default: + dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n", + __func__, opcode); + err = -EINVAL; + goto out_unlock; + } + + request->upiu_req.opcode = opcode; + request->upiu_req.idn = idn; + request->upiu_req.index = index; + request->upiu_req.selector = selector; + + /* Send query request */ + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, + QUERY_REQ_TIMEOUT); + + if (err) { + dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n", + __func__, opcode, idn, err); + goto out_unlock; + } + + *attr_val = be32_to_cpu(response->upiu_res.value); + +out_unlock: + mutex_unlock(&hba->dev_cmd.lock); out: return err; } @@ -562,10 +1293,10 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) /* Allocate memory for UTP command descriptors */ ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); - hba->ucdl_base_addr = dma_alloc_coherent(hba->dev, - ucdl_size, - &hba->ucdl_dma_addr, - GFP_KERNEL); + hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, + ucdl_size, + &hba->ucdl_dma_addr, + GFP_KERNEL); /* * UFSHCI requires UTP command descriptor to be 128 byte aligned. @@ -585,10 +1316,10 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) * UFSHCI requires 1024 byte alignment of UTRD */ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); - hba->utrdl_base_addr = dma_alloc_coherent(hba->dev, - utrdl_size, - &hba->utrdl_dma_addr, - GFP_KERNEL); + hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, + utrdl_size, + &hba->utrdl_dma_addr, + GFP_KERNEL); if (!hba->utrdl_base_addr || WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) { dev_err(hba->dev, @@ -601,10 +1332,10 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) * UFSHCI requires 1024 byte alignment of UTMRD */ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; - hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev, - utmrdl_size, - &hba->utmrdl_dma_addr, - GFP_KERNEL); + hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, + utmrdl_size, + &hba->utmrdl_dma_addr, + GFP_KERNEL); if (!hba->utmrdl_base_addr || WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) { dev_err(hba->dev, @@ -613,14 +1344,15 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) } /* Allocate memory for local reference block */ - hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL); + hba->lrb = devm_kzalloc(hba->dev, + hba->nutrs * sizeof(struct ufshcd_lrb), + GFP_KERNEL); if (!hba->lrb) { dev_err(hba->dev, "LRB Memory allocation failed\n"); goto out; } return 0; out: - ufshcd_free_hba_memory(hba); return -ENOMEM; } @@ -674,11 +1406,11 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) utrdlp[i].prd_table_offset = cpu_to_le16((prdt_offset >> 2)); utrdlp[i].response_upiu_length = - cpu_to_le16(ALIGNED_UPIU_SIZE); + cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); hba->lrb[i].utr_descriptor_ptr = (utrdlp + i); - hba->lrb[i].ucd_cmd_ptr = - (struct utp_upiu_cmd *)(cmd_descp + i); + hba->lrb[i].ucd_req_ptr = + (struct utp_upiu_req *)(cmd_descp + i); hba->lrb[i].ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; hba->lrb[i].ucd_prdt_ptr = @@ -699,35 +1431,263 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) */ static int ufshcd_dme_link_startup(struct ufs_hba *hba) { - struct uic_command *uic_cmd; + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, + "dme-link-startup: error code %d\n", ret); + return ret; +} + +/** + * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @attr_set: attribute set type as uic command argument2 + * @mib_val: setting value as uic command argument3 + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, + u8 attr_set, u32 mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-set", + "dme-peer-set" + }; + const char *set = action[!!peer]; + int ret; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; + uic_cmd.argument1 = attr_sel; + uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); + uic_cmd.argument3 = mib_val; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", + set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); + +/** + * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET + * @hba: per adapter instance + * @attr_sel: uic command argument1 + * @mib_val: the value of the attribute as returned by the UIC command + * @peer: indicate whether peer or local + * + * Returns 0 on success, non-zero value on failure + */ +int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer) +{ + struct uic_command uic_cmd = {0}; + static const char *const action[] = { + "dme-get", + "dme-peer-get" + }; + const char *get = action[!!peer]; + int ret; + + uic_cmd.command = peer ? + UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; + uic_cmd.argument1 = attr_sel; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) { + dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", + get, UIC_GET_ATTR_ID(attr_sel), ret); + goto out; + } + + if (mib_val) + *mib_val = uic_cmd.argument3; +out: + return ret; +} +EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); + +/** + * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage + * using DME_SET primitives. + * @hba: per adapter instance + * @mode: powr mode value + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +{ + struct uic_command uic_cmd = {0}; + struct completion pwr_done; unsigned long flags; + u8 status; + int ret; + + uic_cmd.command = UIC_CMD_DME_SET; + uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); + uic_cmd.argument3 = mode; + init_completion(&pwr_done); + + mutex_lock(&hba->uic_cmd_mutex); - /* check if controller is ready to accept UIC commands */ - if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) & - UIC_COMMAND_READY) == 0x0) { + spin_lock_irqsave(hba->host->host_lock, flags); + hba->pwr_done = &pwr_done; + spin_unlock_irqrestore(hba->host->host_lock, flags); + ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) { dev_err(hba->dev, - "Controller not ready" - " to accept UIC commands\n"); - return -EIO; + "pwr mode change with mode 0x%x uic error %d\n", + mode, ret); + goto out; } + if (!wait_for_completion_timeout(hba->pwr_done, + msecs_to_jiffies(UIC_CMD_TIMEOUT))) { + dev_err(hba->dev, + "pwr mode change with mode 0x%x completion timeout\n", + mode); + ret = -ETIMEDOUT; + goto out; + } + + status = ufshcd_get_upmcrs(hba); + if (status != PWR_LOCAL) { + dev_err(hba->dev, + "pwr mode change failed, host umpcrs:0x%x\n", + status); + ret = (status != PWR_OK) ? status : -1; + } +out: spin_lock_irqsave(hba->host->host_lock, flags); + hba->pwr_done = NULL; + spin_unlock_irqrestore(hba->host->host_lock, flags); + mutex_unlock(&hba->uic_cmd_mutex); + return ret; +} - /* form UIC command */ - uic_cmd = &hba->active_uic_cmd; - uic_cmd->command = UIC_CMD_DME_LINK_STARTUP; - uic_cmd->argument1 = 0; - uic_cmd->argument2 = 0; - uic_cmd->argument3 = 0; +/** + * ufshcd_config_max_pwr_mode - Set & Change power mode with + * maximum capability attribute information. + * @hba: per adapter instance + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) +{ + enum {RX = 0, TX = 1}; + u32 lanes[] = {1, 1}; + u32 gear[] = {1, 1}; + u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; + int ret; - /* enable UIC related interrupts */ - hba->int_enable_mask |= UIC_COMMAND_COMPL; - ufshcd_int_config(hba, UFSHCD_INT_ENABLE); + /* Get the connected lane count */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); - /* sending UIC commands to controller */ - ufshcd_send_uic_command(hba, uic_cmd); - spin_unlock_irqrestore(hba->host->host_lock, flags); - return 0; + /* + * First, get the maximum gears of HS speed. + * If a zero value, it means there is no HSGEAR capability. + * Then, get the maximum gears of PWM speed. + */ + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); + if (!gear[RX]) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); + pwr[RX] = SLOWAUTO_MODE; + } + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); + if (!gear[TX]) { + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), + &gear[TX]); + pwr[TX] = SLOWAUTO_MODE; + } + + /* + * Configure attributes for power mode change with below. + * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, + * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, + * - PA_HSSERIES + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); + if (pwr[RX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); + if (pwr[TX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); + + if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); + + ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); + if (ret) + dev_err(hba->dev, + "pwr_mode: power mode change failed %d\n", ret); + + return ret; +} + +/** + * ufshcd_complete_dev_init() - checks device readiness + * hba: per-adapter instance + * + * Set fDeviceInit flag and poll until device toggles it. + */ +static int ufshcd_complete_dev_init(struct ufs_hba *hba) +{ + int i, retries, err = 0; + bool flag_res = 1; + + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + /* Set the fDeviceInit flag */ + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, NULL); + if (!err || err == -ETIMEDOUT) + break; + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); + } + if (err) { + dev_err(hba->dev, + "%s setting fDeviceInit flag failed with error %d\n", + __func__, err); + goto out; + } + + /* poll for max. 100 iterations for fDeviceInit flag to clear */ + for (i = 0; i < 100 && !err && flag_res; i++) { + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + err = ufshcd_query_flag(hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + if (!err || err == -ETIMEDOUT) + break; + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, + err); + } + } + if (err) + dev_err(hba->dev, + "%s reading fDeviceInit flag failed with error %d\n", + __func__, err); + else if (flag_res) + dev_err(hba->dev, + "%s fDeviceInit was not cleared by the device\n", + __func__); + +out: + return err; } /** @@ -736,9 +1696,10 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) * * To bring UFS host controller to operational state, * 1. Check if device is present - * 2. Configure run-stop-registers - * 3. Enable required interrupts - * 4. Configure interrupt aggregation + * 2. Enable required interrupts + * 3. Configure interrupt aggregation + * 4. Program UTRL and UTMRL base addres + * 5. Configure run-stop-registers * * Returns 0 on success, non-zero value on failure */ @@ -748,13 +1709,29 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) u32 reg; /* check if device present */ - reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS)); + reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); if (!ufshcd_is_device_present(reg)) { dev_err(hba->dev, "cc: Device not present\n"); err = -ENXIO; goto out; } + /* Enable required interrupts */ + ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); + + /* Configure interrupt aggregation */ + ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); + + /* Configure UTRL and UTMRL base address registers */ + ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_H); + ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_H); + /* * UCRDY, UTMRLDY and UTRLRDY bits must be 1 * DEI, HEI bits must be 0 @@ -768,23 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) goto out; } - /* Enable required interrupts */ - hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL | - UIC_ERROR | - UTP_TASK_REQ_COMPL | - DEVICE_FATAL_ERROR | - CONTROLLER_FATAL_ERROR | - SYSTEM_BUS_FATAL_ERROR); - ufshcd_int_config(hba, UFSHCD_INT_ENABLE); - - /* Configure interrupt aggregation */ - ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); - - if (hba->ufshcd_state == UFSHCD_STATE_RESET) - scsi_unblock_requests(hba->host); - - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - scsi_scan_host(hba->host); out: return err; } @@ -853,80 +1813,60 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) } /** - * ufshcd_initialize_hba - start the initialization process + * ufshcd_link_startup - Initialize unipro link startup * @hba: per adapter instance * - * 1. Enable the controller via ufshcd_hba_enable. - * 2. Program the Transfer Request List Address with the starting address of - * UTRDL. - * 3. Program the Task Management Request List Address with starting address - * of UTMRDL. - * - * Returns 0 on success, non-zero value on failure. + * Returns 0 for success, non-zero in case of failure */ -static int ufshcd_initialize_hba(struct ufs_hba *hba) +static int ufshcd_link_startup(struct ufs_hba *hba) { - if (ufshcd_hba_enable(hba)) - return -EIO; + int ret; - /* Configure UTRL and UTMRL base address registers */ - writel(lower_32_bits(hba->utrdl_dma_addr), - (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L)); - writel(upper_32_bits(hba->utrdl_dma_addr), - (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H)); - writel(lower_32_bits(hba->utmrdl_dma_addr), - (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L)); - writel(upper_32_bits(hba->utmrdl_dma_addr), - (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H)); + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); + + ret = ufshcd_dme_link_startup(hba); + if (ret) + goto out; - /* Initialize unipro link startup procedure */ - return ufshcd_dme_link_startup(hba); + ret = ufshcd_make_hba_operational(hba); + +out: + if (ret) + dev_err(hba->dev, "link startup failed %d\n", ret); + return ret; } /** - * ufshcd_do_reset - reset the host controller - * @hba: per adapter instance + * ufshcd_verify_dev_init() - Verify device initialization + * @hba: per-adapter instance * - * Returns SUCCESS/FAILED + * Send NOP OUT UPIU and wait for NOP IN response to check whether the + * device Transport Protocol (UTP) layer is ready after a reset. + * If the UTP layer at the device side is not initialized, it may + * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT + * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations. */ -static int ufshcd_do_reset(struct ufs_hba *hba) +static int ufshcd_verify_dev_init(struct ufs_hba *hba) { - struct ufshcd_lrb *lrbp; - unsigned long flags; - int tag; - - /* block commands from midlayer */ - scsi_block_requests(hba->host); + int err = 0; + int retries; - spin_lock_irqsave(hba->host->host_lock, flags); - hba->ufshcd_state = UFSHCD_STATE_RESET; + mutex_lock(&hba->dev_cmd.lock); + for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { + err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, + NOP_OUT_TIMEOUT); - /* send controller to reset state */ - ufshcd_hba_stop(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (!err || err == -ETIMEDOUT) + break; - /* abort outstanding commands */ - for (tag = 0; tag < hba->nutrs; tag++) { - if (test_bit(tag, &hba->outstanding_reqs)) { - lrbp = &hba->lrb[tag]; - scsi_dma_unmap(lrbp->cmd); - lrbp->cmd->result = DID_RESET << 16; - lrbp->cmd->scsi_done(lrbp->cmd); - lrbp->cmd = NULL; - } + dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); } + mutex_unlock(&hba->dev_cmd.lock); - /* clear outstanding request/task bit maps */ - hba->outstanding_reqs = 0; - hba->outstanding_tasks = 0; - - /* start the initialization process */ - if (ufshcd_initialize_hba(hba)) { - dev_err(hba->dev, - "Reset: Controller initialization failed\n"); - return FAILED; - } - return SUCCESS; + if (err) + dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); + return err; } /** @@ -946,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) sdev->use_10_for_ms = 1; scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); + /* allow SCSI layer to restart the device in case of errors */ + sdev->allow_restart = 1; + /* * Inform SCSI Midlayer that the LUN queue depth is same as the * controller queue depth. If a LUN queue depth is less than the @@ -973,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) * ufshcd_task_req_compl - handle task management request completion * @hba: per adapter instance * @index: index of the completed request + * @resp: task management service response * - * Returns SUCCESS/FAILED + * Returns non-zero value on error, zero on success */ -static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) +static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) { struct utp_task_req_desc *task_req_descp; struct utp_upiu_task_rsp *task_rsp_upiup; @@ -997,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) task_req_descp[index].task_rsp_upiu; task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); - - if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && - task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) - task_result = FAILED; - else - task_result = SUCCESS; + if (resp) + *resp = (u8)task_result; } else { - task_result = FAILED; - dev_err(hba->dev, - "trc: Invalid ocs = %x\n", ocs_value); + dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", + __func__, ocs_value); } spin_unlock_irqrestore(hba->host->host_lock, flags); - return task_result; + + return ocs_value; } /** @@ -1061,32 +2001,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) int result = 0; switch (scsi_status) { - case SAM_STAT_GOOD: - result |= DID_OK << 16 | - COMMAND_COMPLETE << 8 | - SAM_STAT_GOOD; - break; case SAM_STAT_CHECK_CONDITION: + ufshcd_copy_sense_data(lrbp); + case SAM_STAT_GOOD: result |= DID_OK << 16 | COMMAND_COMPLETE << 8 | - SAM_STAT_CHECK_CONDITION; - ufshcd_copy_sense_data(lrbp); - break; - case SAM_STAT_BUSY: - result |= SAM_STAT_BUSY; + scsi_status; break; case SAM_STAT_TASK_SET_FULL: - /* * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue * depth needs to be adjusted to the exact number of * outstanding commands the LUN can handle at any given time. */ ufshcd_adjust_lun_qdepth(lrbp->cmd); - result |= SAM_STAT_TASK_SET_FULL; - break; + case SAM_STAT_BUSY: case SAM_STAT_TASK_ABORTED: - result |= SAM_STAT_TASK_ABORTED; + ufshcd_copy_sense_data(lrbp); + result |= scsi_status; break; default: result |= DID_ERROR << 16; @@ -1115,31 +2047,46 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) switch (ocs) { case OCS_SUCCESS: + result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); + + switch (result) { + case UPIU_TRANSACTION_RESPONSE: + /* + * get the response UPIU result to extract + * the SCSI command status + */ + result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); + + /* + * get the result based on SCSI status response + * to notify the SCSI midlayer of the command status + */ + scsi_status = result & MASK_SCSI_STATUS; + result = ufshcd_scsi_cmd_status(lrbp, scsi_status); - /* check if the returned transfer response is valid */ - result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr); - if (result) { + if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) + schedule_work(&hba->eeh_work); + break; + case UPIU_TRANSACTION_REJECT_UPIU: + /* TODO: handle Reject UPIU Response */ + result = DID_ERROR << 16; dev_err(hba->dev, - "Invalid response = %x\n", result); + "Reject UPIU not fully implemented\n"); + break; + default: + result = DID_ERROR << 16; + dev_err(hba->dev, + "Unexpected request response code = %x\n", + result); break; } - - /* - * get the response UPIU result to extract - * the SCSI command status - */ - result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); - - /* - * get the result based on SCSI status response - * to notify the SCSI midlayer of the command status - */ - scsi_status = result & MASK_SCSI_STATUS; - result = ufshcd_scsi_cmd_status(lrbp, scsi_status); break; case OCS_ABORTED: result |= DID_ABORT << 16; break; + case OCS_INVALID_COMMAND_STATUS: + result |= DID_REQUEUE << 16; + break; case OCS_INVALID_CMD_TABLE_ATTR: case OCS_INVALID_PRDT_ATTR: case OCS_MISMATCH_DATA_BUF_SIZE: @@ -1157,34 +2104,64 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) } /** + * ufshcd_uic_cmd_compl - handle completion of uic command + * @hba: per adapter instance + * @intr_status: interrupt status generated by the controller + */ +static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) +{ + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { + hba->active_uic_cmd->argument2 |= + ufshcd_get_uic_cmd_result(hba); + hba->active_uic_cmd->argument3 = + ufshcd_get_dme_attr_val(hba); + complete(&hba->active_uic_cmd->done); + } + + if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) + complete(hba->pwr_done); +} + +/** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance */ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) { - struct ufshcd_lrb *lrb; + struct ufshcd_lrb *lrbp; + struct scsi_cmnd *cmd; unsigned long completed_reqs; u32 tr_doorbell; int result; int index; + bool int_aggr_reset = false; - lrb = hba->lrb; - tr_doorbell = - readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL); + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; for (index = 0; index < hba->nutrs; index++) { if (test_bit(index, &completed_reqs)) { + lrbp = &hba->lrb[index]; + cmd = lrbp->cmd; + /* + * Don't skip resetting interrupt aggregation counters + * if a regular command is present. + */ + int_aggr_reset |= !lrbp->intr_cmd; - result = ufshcd_transfer_rsp_status(hba, &lrb[index]); - - if (lrb[index].cmd) { - scsi_dma_unmap(lrb[index].cmd); - lrb[index].cmd->result = result; - lrb[index].cmd->scsi_done(lrb[index].cmd); - + if (cmd) { + result = ufshcd_transfer_rsp_status(hba, lrbp); + scsi_dma_unmap(cmd); + cmd->result = result; /* Mark completed command as NULL in LRB */ - lrb[index].cmd = NULL; + lrbp->cmd = NULL; + clear_bit_unlock(index, &hba->lrb_in_use); + /* Do not touch lrbp after scsi done */ + cmd->scsi_done(cmd); + } else if (lrbp->command_type == + UTP_CMD_TYPE_DEV_MANAGE) { + if (hba->dev_cmd.complete) + complete(hba->dev_cmd.complete); } } /* end of if */ } /* end of for */ @@ -1192,68 +2169,380 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) /* clear corresponding bits of completed commands */ hba->outstanding_reqs ^= completed_reqs; + /* we might have free'd some tags above */ + wake_up(&hba->dev_cmd.tag_wq); + /* Reset interrupt aggregation counters */ - ufshcd_config_int_aggr(hba, INT_AGGR_RESET); + if (int_aggr_reset) + ufshcd_reset_intr_aggr(hba); } /** - * ufshcd_uic_cc_handler - handle UIC command completion - * @work: pointer to a work queue structure + * ufshcd_disable_ee - disable exception event + * @hba: per-adapter instance + * @mask: exception event to disable * - * Returns 0 on success, non-zero value on failure + * Disables exception event in the device so that the EVENT_ALERT + * bit is not set. + * + * Returns zero on success, non-zero error value on failure. */ -static void ufshcd_uic_cc_handler (struct work_struct *work) +static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) { - struct ufs_hba *hba; + int err = 0; + u32 val; - hba = container_of(work, struct ufs_hba, uic_workq); + if (!(hba->ee_ctrl_mask & mask)) + goto out; - if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) && - !(ufshcd_get_uic_cmd_result(hba))) { + val = hba->ee_ctrl_mask & ~mask; + val &= 0xFFFF; /* 2 bytes */ + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); + if (!err) + hba->ee_ctrl_mask &= ~mask; +out: + return err; +} - if (ufshcd_make_hba_operational(hba)) - dev_err(hba->dev, - "cc: hba not operational state\n"); - return; +/** + * ufshcd_enable_ee - enable exception event + * @hba: per-adapter instance + * @mask: exception event to enable + * + * Enable corresponding exception event in the device to allow + * device to alert host in critical scenarios. + * + * Returns zero on success, non-zero error value on failure. + */ +static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) +{ + int err = 0; + u32 val; + + if (hba->ee_ctrl_mask & mask) + goto out; + + val = hba->ee_ctrl_mask | mask; + val &= 0xFFFF; /* 2 bytes */ + err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); + if (!err) + hba->ee_ctrl_mask |= mask; +out: + return err; +} + +/** + * ufshcd_enable_auto_bkops - Allow device managed BKOPS + * @hba: per-adapter instance + * + * Allow device to manage background operations on its own. Enabling + * this might lead to inconsistent latencies during normal data transfers + * as the device is allowed to manage its own way of handling background + * operations. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_enable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (hba->auto_bkops_enabled) + goto out; + + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to enable bkops %d\n", + __func__, err); + goto out; } + + hba->auto_bkops_enabled = true; + + /* No need of URGENT_BKOPS exception from the device */ + err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) + dev_err(hba->dev, "%s: failed to disable exception event %d\n", + __func__, err); +out: + return err; +} + +/** + * ufshcd_disable_auto_bkops - block device in doing background operations + * @hba: per-adapter instance + * + * Disabling background operations improves command response latency but + * has drawback of device moving into critical state where the device is + * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the + * host is idle so that BKOPS are managed effectively without any negative + * impacts. + * + * Returns zero on success, non-zero on failure. + */ +static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) +{ + int err = 0; + + if (!hba->auto_bkops_enabled) + goto out; + + /* + * If host assisted BKOPs is to be enabled, make sure + * urgent bkops exception is allowed. + */ + err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS); + if (err) { + dev_err(hba->dev, "%s: failed to enable exception event %d\n", + __func__, err); + goto out; + } + + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG, + QUERY_FLAG_IDN_BKOPS_EN, NULL); + if (err) { + dev_err(hba->dev, "%s: failed to disable bkops %d\n", + __func__, err); + ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS); + goto out; + } + + hba->auto_bkops_enabled = false; +out: + return err; } /** - * ufshcd_fatal_err_handler - handle fatal errors + * ufshcd_force_reset_auto_bkops - force enable of auto bkops * @hba: per adapter instance + * + * After a device reset the device may toggle the BKOPS_EN flag + * to default value. The s/w tracking variables should be updated + * as well. Do this by forcing enable of auto bkops. + */ +static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) +{ + hba->auto_bkops_enabled = false; + hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS; + ufshcd_enable_auto_bkops(hba); +} + +static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status); +} + +/** + * ufshcd_urgent_bkops - handle urgent bkops exception event + * @hba: per-adapter instance + * + * Enable fBackgroundOpsEn flag in the device to permit background + * operations. */ -static void ufshcd_fatal_err_handler(struct work_struct *work) +static int ufshcd_urgent_bkops(struct ufs_hba *hba) +{ + int err; + u32 status = 0; + + err = ufshcd_get_bkops_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", + __func__, err); + goto out; + } + + status = status & 0xF; + + /* handle only if status indicates performance impact or critical */ + if (status >= BKOPS_STATUS_PERF_IMPACT) + err = ufshcd_enable_auto_bkops(hba); +out: + return err; +} + +static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) +{ + return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); +} + +/** + * ufshcd_exception_event_handler - handle exceptions raised by device + * @work: pointer to work data + * + * Read bExceptionEventStatus attribute from the device and handle the + * exception event accordingly. + */ +static void ufshcd_exception_event_handler(struct work_struct *work) { struct ufs_hba *hba; - hba = container_of(work, struct ufs_hba, feh_workq); + int err; + u32 status = 0; + hba = container_of(work, struct ufs_hba, eeh_work); + + pm_runtime_get_sync(hba->dev); + err = ufshcd_get_ee_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get exception status %d\n", + __func__, err); + goto out; + } - /* check if reset is already in progress */ - if (hba->ufshcd_state != UFSHCD_STATE_RESET) - ufshcd_do_reset(hba); + status &= hba->ee_ctrl_mask; + if (status & MASK_EE_URGENT_BKOPS) { + err = ufshcd_urgent_bkops(hba); + if (err) + dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", + __func__, err); + } +out: + pm_runtime_put_sync(hba->dev); + return; } /** - * ufshcd_err_handler - Check for fatal errors - * @work: pointer to a work queue structure + * ufshcd_err_handler - handle UFS errors that require s/w attention + * @work: pointer to work structure */ -static void ufshcd_err_handler(struct ufs_hba *hba) +static void ufshcd_err_handler(struct work_struct *work) +{ + struct ufs_hba *hba; + unsigned long flags; + u32 err_xfer = 0; + u32 err_tm = 0; + int err = 0; + int tag; + + hba = container_of(work, struct ufs_hba, eh_work); + + pm_runtime_get_sync(hba->dev); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ufshcd_state == UFSHCD_STATE_RESET) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + goto out; + } + + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + + /* Complete requests that have door-bell cleared by h/w */ + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Clear pending transfer requests */ + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) + if (ufshcd_clear_cmd(hba, tag)) + err_xfer |= 1 << tag; + + /* Clear pending task management requests */ + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) + if (ufshcd_clear_tm_cmd(hba, tag)) + err_tm |= 1 << tag; + + /* Complete the requests that are cleared by s/w */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Fatal errors need reset */ + if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { + err = ufshcd_reset_and_restore(hba); + if (err) { + dev_err(hba->dev, "%s: reset and restore failed\n", + __func__); + hba->ufshcd_state = UFSHCD_STATE_ERROR; + } + /* + * Inform scsi mid-layer that we did reset and allow to handle + * Unit Attention properly. + */ + scsi_report_bus_reset(hba->host, 0); + hba->saved_err = 0; + hba->saved_uic_err = 0; + } + ufshcd_clear_eh_in_progress(hba); + +out: + scsi_unblock_requests(hba->host); + pm_runtime_put_sync(hba->dev); +} + +/** + * ufshcd_update_uic_error - check and set fatal UIC error flags. + * @hba: per-adapter instance + */ +static void ufshcd_update_uic_error(struct ufs_hba *hba) { u32 reg; + /* PA_INIT_ERROR is fatal and needs UIC reset */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + + /* UIC NL/TL/DME errors needs software retry */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); + if (reg) + hba->uic_error |= UFSHCD_UIC_NL_ERROR; + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); + if (reg) + hba->uic_error |= UFSHCD_UIC_TL_ERROR; + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); + if (reg) + hba->uic_error |= UFSHCD_UIC_DME_ERROR; + + dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", + __func__, hba->uic_error); +} + +/** + * ufshcd_check_errors - Check for errors that need s/w attention + * @hba: per-adapter instance + */ +static void ufshcd_check_errors(struct ufs_hba *hba) +{ + bool queue_eh_work = false; + if (hba->errors & INT_FATAL_ERRORS) - goto fatal_eh; + queue_eh_work = true; if (hba->errors & UIC_ERROR) { + hba->uic_error = 0; + ufshcd_update_uic_error(hba); + if (hba->uic_error) + queue_eh_work = true; + } + + if (queue_eh_work) { + /* handle fatal errors only when link is functional */ + if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { + /* block commands from scsi mid-layer */ + scsi_block_requests(hba->host); + + /* transfer error masks to sticky bits */ + hba->saved_err |= hba->errors; + hba->saved_uic_err |= hba->uic_error; - reg = readl(hba->mmio_base + - REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); - if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) - goto fatal_eh; + hba->ufshcd_state = UFSHCD_STATE_ERROR; + schedule_work(&hba->eh_work); + } } - return; -fatal_eh: - hba->ufshcd_state = UFSHCD_STATE_ERROR; - schedule_work(&hba->feh_workq); + /* + * if (!queue_eh_work) - + * Other errors are either non-fatal where host recovers + * itself without s/w intervention or errors that will be + * handled by the SCSI core layer. + */ } /** @@ -1264,9 +2553,9 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba) { u32 tm_doorbell; - tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL); + tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; - wake_up_interruptible(&hba->ufshcd_tm_wait_queue); + wake_up(&hba->tm_wq); } /** @@ -1278,10 +2567,10 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) { hba->errors = UFSHCD_ERROR_MASK & intr_status; if (hba->errors) - ufshcd_err_handler(hba); + ufshcd_check_errors(hba); - if (intr_status & UIC_COMMAND_COMPL) - schedule_work(&hba->uic_workq); + if (intr_status & UFSHCD_UIC_MASK) + ufshcd_uic_cmd_compl(hba, intr_status); if (intr_status & UTP_TASK_REQ_COMPL) ufshcd_tmc_handler(hba); @@ -1305,53 +2594,69 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) struct ufs_hba *hba = __hba; spin_lock(hba->host->host_lock); - intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS); + intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); if (intr_status) { + ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); ufshcd_sl_intr(hba, intr_status); - - /* If UFSHCI 1.0 then clear interrupt status register */ - if (hba->ufs_version == UFSHCI_VERSION_10) - writel(intr_status, - (hba->mmio_base + REG_INTERRUPT_STATUS)); retval = IRQ_HANDLED; } spin_unlock(hba->host->host_lock); return retval; } +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) +{ + int err = 0; + u32 mask = 1 << tag; + unsigned long flags; + + if (!test_bit(tag, &hba->outstanding_tasks)) + goto out; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* poll for max. 1 sec to clear door bell register by h/w */ + err = ufshcd_wait_for_register(hba, + REG_UTP_TASK_REQ_DOOR_BELL, + mask, 0, 1000, 1000); +out: + return err; +} + /** * ufshcd_issue_tm_cmd - issues task management commands to controller * @hba: per adapter instance - * @lrbp: pointer to local reference block + * @lun_id: LUN ID to which TM command is sent + * @task_id: task ID to which the TM command is applicable + * @tm_function: task management function opcode + * @tm_response: task management service response return value * - * Returns SUCCESS/FAILED + * Returns non-zero value on error, zero on success. */ -static int -ufshcd_issue_tm_cmd(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, - u8 tm_function) +static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, + u8 tm_function, u8 *tm_response) { struct utp_task_req_desc *task_req_descp; struct utp_upiu_task_req *task_req_upiup; struct Scsi_Host *host; unsigned long flags; - int free_slot = 0; + int free_slot; int err; + int task_tag; host = hba->host; - spin_lock_irqsave(host->host_lock, flags); - - /* If task management queue is full */ - free_slot = ufshcd_get_tm_free_slot(hba); - if (free_slot >= hba->nutmrs) { - spin_unlock_irqrestore(host->host_lock, flags); - dev_err(hba->dev, "Task management queue full\n"); - err = FAILED; - goto out; - } + /* + * Get free slot, sleep if slots are unavailable. + * Even though we use wait_event() which sleeps indefinitely, + * the maximum wait time is bounded by %TM_CMD_TIMEOUT. + */ + wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + spin_lock_irqsave(host->host_lock, flags); task_req_descp = hba->utmrdl_base_addr; task_req_descp += free_slot; @@ -1363,110 +2668,105 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, /* Configure task request UPIU */ task_req_upiup = (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; + task_tag = hba->nutrs + free_slot; task_req_upiup->header.dword_0 = - cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, - lrbp->lun, lrbp->task_tag)); + UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, + lun_id, task_tag); task_req_upiup->header.dword_1 = - cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0)); + UPIU_HEADER_DWORD(0, tm_function, 0, 0); - task_req_upiup->input_param1 = lrbp->lun; - task_req_upiup->input_param1 = - cpu_to_be32(task_req_upiup->input_param1); - task_req_upiup->input_param2 = lrbp->task_tag; - task_req_upiup->input_param2 = - cpu_to_be32(task_req_upiup->input_param2); + task_req_upiup->input_param1 = cpu_to_be32(lun_id); + task_req_upiup->input_param2 = cpu_to_be32(task_id); /* send command to the controller */ __set_bit(free_slot, &hba->outstanding_tasks); - writel((1 << free_slot), - (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL)); + ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL); spin_unlock_irqrestore(host->host_lock, flags); /* wait until the task management command is completed */ - err = - wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, - (test_bit(free_slot, - &hba->tm_condition) != 0), - 60 * HZ); + err = wait_event_timeout(hba->tm_wq, + test_bit(free_slot, &hba->tm_condition), + msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { - dev_err(hba->dev, - "Task management command timed-out\n"); - err = FAILED; - goto out; + dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", + __func__, tm_function); + if (ufshcd_clear_tm_cmd(hba, free_slot)) + dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", + __func__, free_slot); + err = -ETIMEDOUT; + } else { + err = ufshcd_task_req_compl(hba, free_slot, tm_response); } + clear_bit(free_slot, &hba->tm_condition); - err = ufshcd_task_req_compl(hba, free_slot); -out: + ufshcd_put_tm_slot(hba, free_slot); + wake_up(&hba->tm_tag_wq); + return err; } /** - * ufshcd_device_reset - reset device and abort all the pending commands + * ufshcd_eh_device_reset_handler - device reset handler registered to + * scsi layer. * @cmd: SCSI command pointer * * Returns SUCCESS/FAILED */ -static int ufshcd_device_reset(struct scsi_cmnd *cmd) +static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct Scsi_Host *host; struct ufs_hba *hba; unsigned int tag; u32 pos; int err; + u8 resp = 0xF; + struct ufshcd_lrb *lrbp; + unsigned long flags; host = cmd->device->host; hba = shost_priv(host); tag = cmd->request->tag; - err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); - if (err == FAILED) + lrbp = &hba->lrb[tag]; + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; goto out; + } - for (pos = 0; pos < hba->nutrs; pos++) { - if (test_bit(pos, &hba->outstanding_reqs) && - (hba->lrb[tag].lun == hba->lrb[pos].lun)) { - - /* clear the respective UTRLCLR register bit */ - ufshcd_utrl_clear(hba, pos); - - clear_bit(pos, &hba->outstanding_reqs); - - if (hba->lrb[pos].cmd) { - scsi_dma_unmap(hba->lrb[pos].cmd); - hba->lrb[pos].cmd->result = - DID_ABORT << 16; - hba->lrb[pos].cmd->scsi_done(cmd); - hba->lrb[pos].cmd = NULL; - } + /* clear the commands that were pending for corresponding LUN */ + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { + if (hba->lrb[pos].lun == lrbp->lun) { + err = ufshcd_clear_cmd(hba, pos); + if (err) + break; } - } /* end of for */ + } + spin_lock_irqsave(host->host_lock, flags); + ufshcd_transfer_req_compl(hba); + spin_unlock_irqrestore(host->host_lock, flags); out: + if (!err) { + err = SUCCESS; + } else { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + err = FAILED; + } return err; } /** - * ufshcd_host_reset - Main reset function registered with scsi layer - * @cmd: SCSI command pointer - * - * Returns SUCCESS/FAILED - */ -static int ufshcd_host_reset(struct scsi_cmnd *cmd) -{ - struct ufs_hba *hba; - - hba = shost_priv(cmd->device->host); - - if (hba->ufshcd_state == UFSHCD_STATE_RESET) - return SUCCESS; - - return ufshcd_do_reset(hba); -} - -/** * ufshcd_abort - abort a specific command * @cmd: SCSI command pointer * + * Abort the pending command in device by sending UFS_ABORT_TASK task management + * command, and in host controller by clearing the door-bell register. There can + * be race between controller sending the command to the device while abort is + * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is + * really issued and then try to abort it. + * * Returns SUCCESS/FAILED */ static int ufshcd_abort(struct scsi_cmnd *cmd) @@ -1475,40 +2775,237 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) struct ufs_hba *hba; unsigned long flags; unsigned int tag; - int err; + int err = 0; + int poll_cnt; + u8 resp = 0xF; + struct ufshcd_lrb *lrbp; host = cmd->device->host; hba = shost_priv(host); tag = cmd->request->tag; - spin_lock_irqsave(host->host_lock, flags); + /* If command is already aborted/completed, return SUCCESS */ + if (!(test_bit(tag, &hba->outstanding_reqs))) + goto out; - /* check if command is still pending */ - if (!(test_bit(tag, &hba->outstanding_reqs))) { - err = FAILED; - spin_unlock_irqrestore(host->host_lock, flags); + lrbp = &hba->lrb[tag]; + for (poll_cnt = 100; poll_cnt; poll_cnt--) { + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_QUERY_TASK, &resp); + if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { + /* cmd pending in the device */ + break; + } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + u32 reg; + + /* + * cmd not pending in the device, check if it is + * in transition. + */ + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + if (reg & (1 << tag)) { + /* sleep for max. 200us to stabilize */ + usleep_range(100, 200); + continue; + } + /* command completed already */ + goto out; + } else { + if (!err) + err = resp; /* service response error */ + goto out; + } + } + + if (!poll_cnt) { + err = -EBUSY; goto out; } - spin_unlock_irqrestore(host->host_lock, flags); - err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); - if (err == FAILED) + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_ABORT_TASK, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; /* service response error */ + goto out; + } + + err = ufshcd_clear_cmd(hba, tag); + if (err) goto out; scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); - - /* clear the respective UTRLCLR register bit */ - ufshcd_utrl_clear(hba, tag); - __clear_bit(tag, &hba->outstanding_reqs); hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); + + clear_bit_unlock(tag, &hba->lrb_in_use); + wake_up(&hba->dev_cmd.tag_wq); +out: + if (!err) { + err = SUCCESS; + } else { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + err = FAILED; + } + + return err; +} + +/** + * ufshcd_host_reset_and_restore - reset and restore host controller + * @hba: per-adapter instance + * + * Note that host controller reset may issue DME_RESET to + * local and remote (device) Uni-Pro stack and the attributes + * are reset to default state. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) +{ + int err; + async_cookie_t cookie; + unsigned long flags; + + /* Reset the host controller */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_hba_stop(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_hba_enable(hba); + if (err) + goto out; + + /* Establish the link again and restore the device */ + cookie = async_schedule(ufshcd_async_scan, hba); + /* wait for async scan to be completed */ + async_synchronize_cookie(++cookie); + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + err = -EIO; out: + if (err) + dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); + + return err; +} + +/** + * ufshcd_reset_and_restore - reset and re-initialize host/device + * @hba: per-adapter instance + * + * Reset and recover device, host and re-establish link. This + * is helpful to recover the communication in fatal error conditions. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_reset_and_restore(struct ufs_hba *hba) +{ + int err = 0; + unsigned long flags; + + err = ufshcd_host_reset_and_restore(hba); + + /* + * After reset the door-bell might be cleared, complete + * outstanding requests in s/w here. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return err; +} + +/** + * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer + * @cmd - SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + int err; + unsigned long flags; + struct ufs_hba *hba; + + hba = shost_priv(cmd->device->host); + + /* + * Check if there is any race with fatal error handling. + * If so, wait for it to complete. Even though fatal error + * handling does reset and restore in some cases, don't assume + * anything out of it. We are just avoiding race here. + */ + do { + spin_lock_irqsave(hba->host->host_lock, flags); + if (!(work_pending(&hba->eh_work) || + hba->ufshcd_state == UFSHCD_STATE_RESET)) + break; + spin_unlock_irqrestore(hba->host->host_lock, flags); + dev_dbg(hba->dev, "%s: reset in progress\n", __func__); + flush_work(&hba->eh_work); + } while (1); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!err) { + err = SUCCESS; + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + } else { + err = FAILED; + hba->ufshcd_state = UFSHCD_STATE_ERROR; + } + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err; } +/** + * ufshcd_async_scan - asynchronous execution for link startup + * @data: data pointer to pass to this function + * @cookie: cookie data + */ +static void ufshcd_async_scan(void *data, async_cookie_t cookie) +{ + struct ufs_hba *hba = (struct ufs_hba *)data; + int ret; + + ret = ufshcd_link_startup(hba); + if (ret) + goto out; + + ufshcd_config_max_pwr_mode(hba); + + ret = ufshcd_verify_dev_init(hba); + if (ret) + goto out; + + ret = ufshcd_complete_dev_init(hba); + if (ret) + goto out; + + ufshcd_force_reset_auto_bkops(hba); + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + + /* If we are in error handling context no need to scan the host */ + if (!ufshcd_eh_in_progress(hba)) { + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); + } +out: + return; +} + static struct scsi_host_template ufshcd_driver_template = { .module = THIS_MODULE, .name = UFSHCD, @@ -1517,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = { .slave_alloc = ufshcd_slave_alloc, .slave_destroy = ufshcd_slave_destroy, .eh_abort_handler = ufshcd_abort, - .eh_device_reset_handler = ufshcd_device_reset, - .eh_host_reset_handler = ufshcd_host_reset, + .eh_device_reset_handler = ufshcd_eh_device_reset_handler, + .eh_host_reset_handler = ufshcd_eh_host_reset_handler, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -1568,16 +3065,33 @@ int ufshcd_resume(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_resume); -/** - * ufshcd_hba_free - free allocated memory for - * host memory space data structures - * @hba: per adapter instance - */ -static void ufshcd_hba_free(struct ufs_hba *hba) +int ufshcd_runtime_suspend(struct ufs_hba *hba) +{ + if (!hba) + return 0; + + /* + * The device is idle with no requests in the queue, + * allow background operations. + */ + return ufshcd_enable_auto_bkops(hba); +} +EXPORT_SYMBOL(ufshcd_runtime_suspend); + +int ufshcd_runtime_resume(struct ufs_hba *hba) +{ + if (!hba) + return 0; + + return ufshcd_disable_auto_bkops(hba); +} +EXPORT_SYMBOL(ufshcd_runtime_resume); + +int ufshcd_runtime_idle(struct ufs_hba *hba) { - iounmap(hba->mmio_base); - ufshcd_free_hba_memory(hba); + return 0; } +EXPORT_SYMBOL(ufshcd_runtime_idle); /** * ufshcd_remove - de-allocate SCSI host and host memory space @@ -1586,13 +3100,11 @@ static void ufshcd_hba_free(struct ufs_hba *hba) */ void ufshcd_remove(struct ufs_hba *hba) { + scsi_remove_host(hba->host); /* disable interrupts */ - ufshcd_int_config(hba, UFSHCD_INT_DISABLE); - + ufshcd_disable_intr(hba, hba->intr_mask); ufshcd_hba_stop(hba); - ufshcd_hba_free(hba); - scsi_remove_host(hba->host); scsi_host_put(hba->host); } EXPORT_SYMBOL_GPL(ufshcd_remove); @@ -1645,6 +3157,9 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, /* Get UFS version supported by the controller */ hba->ufs_version = ufshcd_get_ufs_version(hba); + /* Get Interrupt bit mask per version */ + hba->intr_mask = ufshcd_get_intr_mask(hba); + /* Allocate memory for host memory space */ err = ufshcd_memory_alloc(hba); if (err) { @@ -1664,48 +3179,60 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, host->max_cmd_len = MAX_CDB_SIZE; /* Initailize wait queue for task management */ - init_waitqueue_head(&hba->ufshcd_tm_wait_queue); + init_waitqueue_head(&hba->tm_wq); + init_waitqueue_head(&hba->tm_tag_wq); /* Initialize work queues */ - INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler); - INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); + INIT_WORK(&hba->eh_work, ufshcd_err_handler); + INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); + + /* Initialize UIC command mutex */ + mutex_init(&hba->uic_cmd_mutex); + + /* Initialize mutex for device management commands */ + mutex_init(&hba->dev_cmd.lock); + + /* Initialize device management tag acquire wait queue */ + init_waitqueue_head(&hba->dev_cmd.tag_wq); /* IRQ registration */ - err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); + err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); if (err) { dev_err(hba->dev, "request irq failed\n"); - goto out_lrb_free; + goto out_disable; } /* Enable SCSI tag mapping */ err = scsi_init_shared_tag_map(host, host->can_queue); if (err) { dev_err(hba->dev, "init shared queue failed\n"); - goto out_free_irq; + goto out_disable; } err = scsi_add_host(host, hba->dev); if (err) { dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_free_irq; + goto out_disable; } - /* Initialization routine */ - err = ufshcd_initialize_hba(hba); + /* Host controller enable */ + err = ufshcd_hba_enable(hba); if (err) { - dev_err(hba->dev, "Initialization failed\n"); + dev_err(hba->dev, "Host controller enable failed\n"); goto out_remove_scsi_host; } + *hba_handle = hba; + /* Hold auto suspend until async scan completes */ + pm_runtime_get_sync(dev); + + async_schedule(ufshcd_async_scan, hba); + return 0; out_remove_scsi_host: scsi_remove_host(hba->host); -out_free_irq: - free_irq(irq, hba); -out_lrb_free: - ufshcd_free_hba_memory(hba); out_disable: scsi_host_put(host); out_error: diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 6b99a42f581..acf318e338e 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -51,6 +51,7 @@ #include <linux/bitops.h> #include <linux/pm_runtime.h> #include <linux/clk.h> +#include <linux/completion.h> #include <asm/irq.h> #include <asm/byteorder.h> @@ -67,6 +68,11 @@ #define UFSHCD "ufshcd" #define UFSHCD_DRIVER_VERSION "0.2" +enum dev_cmd_type { + DEV_CMD_TYPE_NOP = 0x0, + DEV_CMD_TYPE_QUERY = 0x1, +}; + /** * struct uic_command - UIC command structure * @command: UIC command @@ -75,6 +81,7 @@ * @argument3: UIC command argument 3 * @cmd_active: Indicate if UIC command is outstanding * @result: UIC command result + * @done: UIC command completion */ struct uic_command { u32 command; @@ -83,12 +90,13 @@ struct uic_command { u32 argument3; int cmd_active; int result; + struct completion done; }; /** * struct ufshcd_lrb - local reference block * @utr_descriptor_ptr: UTRD address of the command - * @ucd_cmd_ptr: UCD address of the command + * @ucd_req_ptr: UCD address of the command * @ucd_rsp_ptr: Response UPIU address for this command * @ucd_prdt_ptr: PRDT address of the command * @cmd: pointer to SCSI command @@ -98,10 +106,11 @@ struct uic_command { * @command_type: SCSI, UFS, Query. * @task_tag: Task tag of the command * @lun: LUN of the command + * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) */ struct ufshcd_lrb { struct utp_transfer_req_desc *utr_descriptor_ptr; - struct utp_upiu_cmd *ucd_cmd_ptr; + struct utp_upiu_req *ucd_req_ptr; struct utp_upiu_rsp *ucd_rsp_ptr; struct ufshcd_sg_entry *ucd_prdt_ptr; @@ -113,8 +122,35 @@ struct ufshcd_lrb { int command_type; int task_tag; unsigned int lun; + bool intr_cmd; +}; + +/** + * struct ufs_query - holds relevent data structures for query request + * @request: request upiu and function + * @descriptor: buffer for sending/receiving descriptor + * @response: response upiu and response + */ +struct ufs_query { + struct ufs_query_req request; + u8 *descriptor; + struct ufs_query_res response; }; +/** + * struct ufs_dev_cmd - all assosiated fields with device management commands + * @type: device management command type - Query, NOP OUT + * @lock: lock to allow one command at a time + * @complete: internal commands completion + * @tag_wq: wait queue until free command slot is available + */ +struct ufs_dev_cmd { + enum dev_cmd_type type; + struct mutex lock; + struct completion *complete; + wait_queue_head_t tag_wq; + struct ufs_query query; +}; /** * struct ufs_hba - per adapter private structure @@ -128,6 +164,7 @@ struct ufshcd_lrb { * @host: Scsi_Host instance of the driver * @dev: device handle * @lrb: local reference block + * @lrb_in_use: lrb in use * @outstanding_tasks: Bits representing outstanding task requests * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities @@ -136,13 +173,24 @@ struct ufshcd_lrb { * @ufs_version: UFS Version to which controller complies * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command - * @ufshcd_tm_wait_queue: wait queue for task management + * @uic_cmd_mutex: mutex for uic command + * @tm_wq: wait queue for task management + * @tm_tag_wq: wait queue for free task management slots + * @tm_slots_in_use: bit map of task management request slots in use + * @pwr_done: completion for power mode change * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states - * @int_enable_mask: Interrupt Mask Bits - * @uic_workq: Work queue for UIC completion handling - * @feh_workq: Work queue for fatal controller error handling + * @eh_flags: Error handling flags + * @intr_mask: Interrupt Mask Bits + * @ee_ctrl_mask: Exception event control mask + * @eh_work: Worker to handle UFS errors that require s/w attention + * @eeh_work: Worker to handle exception events * @errors: HBA errors + * @uic_error: UFS interconnect layer error status + * @saved_err: sticky error mask + * @saved_uic_err: sticky UIC error mask + * @dev_cmd: ufs device management command information + * @auto_bkops_enabled: to track whether bkops is enabled in device */ struct ufs_hba { void __iomem *mmio_base; @@ -161,6 +209,7 @@ struct ufs_hba { struct device *dev; struct ufshcd_lrb *lrb; + unsigned long lrb_in_use; unsigned long outstanding_tasks; unsigned long outstanding_reqs; @@ -171,21 +220,42 @@ struct ufs_hba { u32 ufs_version; unsigned int irq; - struct uic_command active_uic_cmd; - wait_queue_head_t ufshcd_tm_wait_queue; + struct uic_command *active_uic_cmd; + struct mutex uic_cmd_mutex; + + wait_queue_head_t tm_wq; + wait_queue_head_t tm_tag_wq; unsigned long tm_condition; + unsigned long tm_slots_in_use; + + struct completion *pwr_done; u32 ufshcd_state; - u32 int_enable_mask; + u32 eh_flags; + u32 intr_mask; + u16 ee_ctrl_mask; /* Work Queues */ - struct work_struct uic_workq; - struct work_struct feh_workq; + struct work_struct eh_work; + struct work_struct eeh_work; /* HBA Errors */ u32 errors; + u32 uic_error; + u32 saved_err; + u32 saved_uic_err; + + /* Device management request data */ + struct ufs_dev_cmd dev_cmd; + + bool auto_bkops_enabled; }; +#define ufshcd_writel(hba, val, reg) \ + writel((val), (hba)->mmio_base + (reg)) +#define ufshcd_readl(hba, reg) \ + readl((hba)->mmio_base + (reg)) + int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * , unsigned int); void ufshcd_remove(struct ufs_hba *); @@ -196,7 +266,69 @@ void ufshcd_remove(struct ufs_hba *); */ static inline void ufshcd_hba_stop(struct ufs_hba *hba) { - writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE)); + ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); +} + +static inline void check_upiu_size(void) +{ + BUILD_BUG_ON(ALIGNED_UPIU_SIZE < + GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); +} + +extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state); +extern int ufshcd_resume(struct ufs_hba *hba); +extern int ufshcd_runtime_suspend(struct ufs_hba *hba); +extern int ufshcd_runtime_resume(struct ufs_hba *hba); +extern int ufshcd_runtime_idle(struct ufs_hba *hba); +extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, + u8 attr_set, u32 mib_val, u8 peer); +extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, + u32 *mib_val, u8 peer); + +/* UIC command interfaces for DME primitives */ +#define DME_LOCAL 0 +#define DME_PEER 1 +#define ATTR_SET_NOR 0 /* NORMAL */ +#define ATTR_SET_ST 1 /* STATIC */ + +static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, + mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, + mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, + mib_val, DME_PEER); +} + +static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, + u32 mib_val) +{ + return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, + mib_val, DME_PEER); +} + +static inline int ufshcd_dme_get(struct ufs_hba *hba, + u32 attr_sel, u32 *mib_val) +{ + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); +} + +static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, + u32 attr_sel, u32 *mib_val) +{ + return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); } #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 0c164847a3e..9abc7e32b43 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -39,7 +39,7 @@ enum { TASK_REQ_UPIU_SIZE_DWORDS = 8, TASK_RSP_UPIU_SIZE_DWORDS = 8, - ALIGNED_UPIU_SIZE = 128, + ALIGNED_UPIU_SIZE = 512, }; /* UFSHCI Registers */ @@ -124,6 +124,9 @@ enum { #define CONTROLLER_FATAL_ERROR UFS_BIT(16) #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) +#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ + UIC_POWER_MODE) + #define UFSHCD_ERROR_MASK (UIC_ERROR |\ DEVICE_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\ @@ -142,6 +145,15 @@ enum { #define DEVICE_ERROR_INDICATOR UFS_BIT(5) #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) +enum { + PWR_OK = 0x0, + PWR_LOCAL = 0x01, + PWR_REMOTE = 0x02, + PWR_BUSY = 0x03, + PWR_ERROR_CAP = 0x04, + PWR_FATAL_ERROR = 0x05, +}; + /* HCE - Host Controller Enable 34h */ #define CONTROLLER_ENABLE UFS_BIT(0) #define CONTROLLER_DISABLE 0x0 @@ -191,6 +203,12 @@ enum { #define CONFIG_RESULT_CODE_MASK 0xFF #define GENERIC_ERROR_CODE_MASK 0xFF +#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ + ((sel) & 0xFFFF)) +#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) +#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) +#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) + /* UIC Commands */ enum { UIC_CMD_DME_GET = 0x01, @@ -226,16 +244,17 @@ enum { #define MASK_UIC_COMMAND_RESULT 0xFF -#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) -#define INT_AGGR_TIMEOUT_VALUE (0x02) +#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) +#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0) /* Interrupt disable masks */ enum { /* Interrupt disable mask for UFSHCI v1.0 */ - INTERRUPT_DISABLE_MASK_10 = 0xFFFF, + INTERRUPT_MASK_ALL_VER_10 = 0x30FFF, + INTERRUPT_MASK_RW_VER_10 = 0x30000, /* Interrupt disable mask for UFSHCI v1.1 */ - INTERRUPT_DISABLE_MASK_11 = 0x0, + INTERRUPT_MASK_ALL_VER_11 = 0x31FFF, }; /* @@ -285,10 +304,10 @@ enum { * @size: size of physical segment DW-3 */ struct ufshcd_sg_entry { - u32 base_addr; - u32 upper_addr; - u32 reserved; - u32 size; + __le32 base_addr; + __le32 upper_addr; + __le32 reserved; + __le32 size; }; /** @@ -311,10 +330,10 @@ struct utp_transfer_cmd_desc { * @dword3: Descriptor Header DW3 */ struct request_desc_header { - u32 dword_0; - u32 dword_1; - u32 dword_2; - u32 dword_3; + __le32 dword_0; + __le32 dword_1; + __le32 dword_2; + __le32 dword_3; }; /** @@ -333,16 +352,16 @@ struct utp_transfer_req_desc { struct request_desc_header header; /* DW 4-5*/ - u32 command_desc_base_addr_lo; - u32 command_desc_base_addr_hi; + __le32 command_desc_base_addr_lo; + __le32 command_desc_base_addr_hi; /* DW 6 */ - u16 response_upiu_length; - u16 response_upiu_offset; + __le16 response_upiu_length; + __le16 response_upiu_offset; /* DW 7 */ - u16 prd_table_length; - u16 prd_table_offset; + __le16 prd_table_length; + __le16 prd_table_offset; }; /** @@ -357,10 +376,10 @@ struct utp_task_req_desc { struct request_desc_header header; /* DW 4-11 */ - u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; + __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; /* DW 12-19 */ - u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; + __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; }; #endif /* End of Header */ diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h new file mode 100644 index 00000000000..0bb8041c047 --- /dev/null +++ b/drivers/scsi/ufs/unipro.h @@ -0,0 +1,151 @@ +/* + * drivers/scsi/ufs/unipro.h + * + * Copyright (C) 2013 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UNIPRO_H_ +#define _UNIPRO_H_ + +/* + * PHY Adpater attributes + */ +#define PA_ACTIVETXDATALANES 0x1560 +#define PA_ACTIVERXDATALANES 0x1580 +#define PA_TXTRAILINGCLOCKS 0x1564 +#define PA_PHY_TYPE 0x1500 +#define PA_AVAILTXDATALANES 0x1520 +#define PA_AVAILRXDATALANES 0x1540 +#define PA_MINRXTRAILINGCLOCKS 0x1543 +#define PA_TXPWRSTATUS 0x1567 +#define PA_RXPWRSTATUS 0x1582 +#define PA_TXFORCECLOCK 0x1562 +#define PA_TXPWRMODE 0x1563 +#define PA_LEGACYDPHYESCDL 0x1570 +#define PA_MAXTXSPEEDFAST 0x1521 +#define PA_MAXTXSPEEDSLOW 0x1522 +#define PA_MAXRXSPEEDFAST 0x1541 +#define PA_MAXRXSPEEDSLOW 0x1542 +#define PA_TXLINKSTARTUPHS 0x1544 +#define PA_TXSPEEDFAST 0x1565 +#define PA_TXSPEEDSLOW 0x1566 +#define PA_REMOTEVERINFO 0x15A0 +#define PA_TXGEAR 0x1568 +#define PA_TXTERMINATION 0x1569 +#define PA_HSSERIES 0x156A +#define PA_PWRMODE 0x1571 +#define PA_RXGEAR 0x1583 +#define PA_RXTERMINATION 0x1584 +#define PA_MAXRXPWMGEAR 0x1586 +#define PA_MAXRXHSGEAR 0x1587 +#define PA_RXHSUNTERMCAP 0x15A5 +#define PA_RXLSTERMCAP 0x15A6 +#define PA_PACPREQTIMEOUT 0x1590 +#define PA_PACPREQEOBTIMEOUT 0x1591 +#define PA_HIBERN8TIME 0x15A7 +#define PA_LOCALVERINFO 0x15A9 +#define PA_TACTIVATE 0x15A8 +#define PA_PACPFRAMECOUNT 0x15C0 +#define PA_PACPERRORCOUNT 0x15C1 +#define PA_PHYTESTCONTROL 0x15C2 +#define PA_PWRMODEUSERDATA0 0x15B0 +#define PA_PWRMODEUSERDATA1 0x15B1 +#define PA_PWRMODEUSERDATA2 0x15B2 +#define PA_PWRMODEUSERDATA3 0x15B3 +#define PA_PWRMODEUSERDATA4 0x15B4 +#define PA_PWRMODEUSERDATA5 0x15B5 +#define PA_PWRMODEUSERDATA6 0x15B6 +#define PA_PWRMODEUSERDATA7 0x15B7 +#define PA_PWRMODEUSERDATA8 0x15B8 +#define PA_PWRMODEUSERDATA9 0x15B9 +#define PA_PWRMODEUSERDATA10 0x15BA +#define PA_PWRMODEUSERDATA11 0x15BB +#define PA_CONNECTEDTXDATALANES 0x1561 +#define PA_CONNECTEDRXDATALANES 0x1581 +#define PA_LOGICALLANEMAP 0x15A1 +#define PA_SLEEPNOCONFIGTIME 0x15A2 +#define PA_STALLNOCONFIGTIME 0x15A3 +#define PA_SAVECONFIGTIME 0x15A4 + +/* PA power modes */ +enum { + FAST_MODE = 1, + SLOW_MODE = 2, + FASTAUTO_MODE = 4, + SLOWAUTO_MODE = 5, + UNCHANGED = 7, +}; + +/* PA TX/RX Frequency Series */ +enum { + PA_HS_MODE_A = 1, + PA_HS_MODE_B = 2, +}; + +/* + * Data Link Layer Attributes + */ +#define DL_TC0TXFCTHRESHOLD 0x2040 +#define DL_FC0PROTTIMEOUTVAL 0x2041 +#define DL_TC0REPLAYTIMEOUTVAL 0x2042 +#define DL_AFC0REQTIMEOUTVAL 0x2043 +#define DL_AFC0CREDITTHRESHOLD 0x2044 +#define DL_TC0OUTACKTHRESHOLD 0x2045 +#define DL_TC1TXFCTHRESHOLD 0x2060 +#define DL_FC1PROTTIMEOUTVAL 0x2061 +#define DL_TC1REPLAYTIMEOUTVAL 0x2062 +#define DL_AFC1REQTIMEOUTVAL 0x2063 +#define DL_AFC1CREDITTHRESHOLD 0x2064 +#define DL_TC1OUTACKTHRESHOLD 0x2065 +#define DL_TXPREEMPTIONCAP 0x2000 +#define DL_TC0TXMAXSDUSIZE 0x2001 +#define DL_TC0RXINITCREDITVAL 0x2002 +#define DL_TC0TXBUFFERSIZE 0x2005 +#define DL_PEERTC0PRESENT 0x2046 +#define DL_PEERTC0RXINITCREVAL 0x2047 +#define DL_TC1TXMAXSDUSIZE 0x2003 +#define DL_TC1RXINITCREDITVAL 0x2004 +#define DL_TC1TXBUFFERSIZE 0x2006 +#define DL_PEERTC1PRESENT 0x2066 +#define DL_PEERTC1RXINITCREVAL 0x2067 + +/* + * Network Layer Attributes + */ +#define N_DEVICEID 0x3000 +#define N_DEVICEID_VALID 0x3001 +#define N_TC0TXMAXSDUSIZE 0x3020 +#define N_TC1TXMAXSDUSIZE 0x3021 + +/* + * Transport Layer Attributes + */ +#define T_NUMCPORTS 0x4000 +#define T_NUMTESTFEATURES 0x4001 +#define T_CONNECTIONSTATE 0x4020 +#define T_PEERDEVICEID 0x4021 +#define T_PEERCPORTID 0x4022 +#define T_TRAFFICCLASS 0x4023 +#define T_PROTOCOLID 0x4024 +#define T_CPORTFLAGS 0x4025 +#define T_TXTOKENVALUE 0x4026 +#define T_RXTOKENVALUE 0x4027 +#define T_LOCALBUFFERSPACE 0x4028 +#define T_PEERBUFFERSPACE 0x4029 +#define T_CREDITSTOSEND 0x402A +#define T_CPORTMODE 0x402B +#define T_TC0TXMAXSDUSIZE 0x4060 +#define T_TC1TXMAXSDUSIZE 0x4061 + +/* Boolean attribute values */ +enum { + FALSE = 0, + TRUE, +}; + +#endif /* _UNIPRO_H_ */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 3449a1f8c65..308256b5e4c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -13,6 +13,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/slab.h> #include <linux/mempool.h> @@ -20,12 +22,15 @@ #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/virtio_scsi.h> +#include <linux/cpu.h> +#include <linux/blkdev.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 +#define VIRTIO_SCSI_VQ_BASE 2 /* Command queue element */ struct virtio_scsi_cmd { @@ -33,6 +38,7 @@ struct virtio_scsi_cmd { struct completion *comp; union { struct virtio_scsi_cmd_req cmd; + struct virtio_scsi_cmd_req_pi cmd_pi; struct virtio_scsi_ctrl_tmf_req tmf; struct virtio_scsi_ctrl_an_req an; } req; @@ -57,27 +63,56 @@ struct virtio_scsi_vq { struct virtqueue *vq; }; -/* Per-target queue state */ +/* + * Per-target queue state. + * + * This struct holds the data needed by the queue steering policy. When a + * target is sent multiple requests, we need to drive them to the same queue so + * that FIFO processing order is kept. However, if a target was idle, we can + * choose a queue arbitrarily. In this case the queue is chosen according to + * the current VCPU, so the driver expects the number of request queues to be + * equal to the number of VCPUs. This makes it easy and fast to select the + * queue, and also lets the driver optimize the IRQ affinity for the virtqueues + * (each virtqueue's affinity is set to the CPU that "owns" the queue). + * + * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq + * could be done locklessly, but we do not do it yet. + * + * Decrements of reqs are never concurrent with writes of req_vq: before the + * decrement reqs will be != 0; after the decrement the virtqueue completion + * routine will not use the req_vq so it can be changed by a new request. + * Thus they can happen outside the tgt_lock, provided of course we make reqs + * an atomic_t. + */ struct virtio_scsi_target_state { - /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */ + /* This spinlock never held at the same time as vq_lock. */ spinlock_t tgt_lock; - /* For sglist construction when adding commands to the virtqueue. */ - struct scatterlist sg[]; + /* Count of outstanding requests. */ + atomic_t reqs; + + /* Currently active virtqueue for requests sent to this target. */ + struct virtio_scsi_vq *req_vq; }; /* Driver instance state */ struct virtio_scsi { struct virtio_device *vdev; - struct virtio_scsi_vq ctrl_vq; - struct virtio_scsi_vq event_vq; - struct virtio_scsi_vq req_vq; - /* Get some buffers ready for event vq */ struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; - struct virtio_scsi_target_state *tgt[]; + u32 num_queues; + + /* If the affinity hint is set for virtqueues */ + bool affinity_hint_set; + + /* CPU hotplug notifier */ + struct notifier_block nb; + + struct virtio_scsi_vq ctrl_vq; + struct virtio_scsi_vq event_vq; + struct virtio_scsi_vq req_vqs[]; }; static struct kmem_cache *virtscsi_cmd_cache; @@ -107,11 +142,13 @@ static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) * * Called with vq_lock held. */ -static void virtscsi_complete_cmd(void *buf) +static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; dev_dbg(&sc->device->sdev_gendev, "cmd %p response %u status %#02x sense_len %u\n", @@ -164,54 +201,70 @@ static void virtscsi_complete_cmd(void *buf) set_driver_byte(sc, DRIVER_SENSE); } - mempool_free(cmd, virtscsi_cmd_pool); sc->scsi_done(sc); + + atomic_dec(&tgt->reqs); } -static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf)) +static void virtscsi_vq_done(struct virtio_scsi *vscsi, + struct virtio_scsi_vq *virtscsi_vq, + void (*fn)(struct virtio_scsi *vscsi, void *buf)) { void *buf; unsigned int len; + unsigned long flags; + struct virtqueue *vq = virtscsi_vq->vq; + spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); do { virtqueue_disable_cb(vq); while ((buf = virtqueue_get_buf(vq, &len)) != NULL) - fn(buf); + fn(vscsi, buf); + + if (unlikely(virtqueue_is_broken(vq))) + break; } while (!virtqueue_enable_cb(vq)); + spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); } static void virtscsi_req_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); - unsigned long flags; + int index = vq->index - VIRTIO_SCSI_VQ_BASE; + struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; - spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags); - virtscsi_vq_done(vq, virtscsi_complete_cmd); - spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags); + virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; -static void virtscsi_complete_free(void *buf) +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + +static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; if (cmd->comp) complete_all(cmd->comp); - else - mempool_free(cmd, virtscsi_cmd_pool); } static void virtscsi_ctrl_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); - unsigned long flags; - spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags); - virtscsi_vq_done(vq, virtscsi_complete_free); - spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags); + virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; +static void virtscsi_handle_event(struct work_struct *work); + static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { @@ -219,12 +272,13 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct scatterlist sg; unsigned long flags; + INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); - err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, - GFP_ATOMIC); + err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, + GFP_ATOMIC); if (!err) virtqueue_kick(vscsi->event_vq.vq); @@ -254,7 +308,7 @@ static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) } static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, - struct virtio_scsi_event *event) + struct virtio_scsi_event *event) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); @@ -332,11 +386,10 @@ static void virtscsi_handle_event(struct work_struct *work) virtscsi_kick_event(vscsi, event_node); } -static void virtscsi_complete_event(void *buf) +static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; - INIT_WORK(&event_node->work, virtscsi_handle_event); schedule_work(&event_node->work); } @@ -344,82 +397,72 @@ static void virtscsi_event_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); - unsigned long flags; - spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); - virtscsi_vq_done(vq, virtscsi_complete_event); - spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); + virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); }; -static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, - struct scsi_data_buffer *sdb) -{ - struct sg_table *table = &sdb->table; - struct scatterlist *sg_elem; - unsigned int idx = *p_idx; - int i; - - for_each_sg(table->sgl, sg_elem, table->nents, i) - sg[idx++] = *sg_elem; - - *p_idx = idx; -} - /** - * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist - * @vscsi : virtio_scsi state + * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue + * @vq : the struct virtqueue we're talking about * @cmd : command structure - * @out_num : number of read-only elements - * @in_num : number of write-only elements * @req_size : size of the request buffer * @resp_size : size of the response buffer - * - * Called with tgt_lock held. */ -static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt, - struct virtio_scsi_cmd *cmd, - unsigned *out_num, unsigned *in_num, - size_t req_size, size_t resp_size) +static int virtscsi_add_cmd(struct virtqueue *vq, + struct virtio_scsi_cmd *cmd, + size_t req_size, size_t resp_size) { struct scsi_cmnd *sc = cmd->sc; - struct scatterlist *sg = tgt->sg; - unsigned int idx = 0; + struct scatterlist *sgs[6], req, resp; + struct sg_table *out, *in; + unsigned out_num = 0, in_num = 0; + + out = in = NULL; + + if (sc && sc->sc_data_direction != DMA_NONE) { + if (sc->sc_data_direction != DMA_FROM_DEVICE) + out = &scsi_out(sc)->table; + if (sc->sc_data_direction != DMA_TO_DEVICE) + in = &scsi_in(sc)->table; + } /* Request header. */ - sg_set_buf(&sg[idx++], &cmd->req, req_size); + sg_init_one(&req, &cmd->req, req_size); + sgs[out_num++] = &req; /* Data-out buffer. */ - if (sc && sc->sc_data_direction != DMA_FROM_DEVICE) - virtscsi_map_sgl(sg, &idx, scsi_out(sc)); - - *out_num = idx; + if (out) { + /* Place WRITE protection SGLs before Data OUT payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num++] = scsi_prot_sglist(sc); + sgs[out_num++] = out->sgl; + } /* Response header. */ - sg_set_buf(&sg[idx++], &cmd->resp, resp_size); + sg_init_one(&resp, &cmd->resp, resp_size); + sgs[out_num + in_num++] = &resp; /* Data-in buffer */ - if (sc && sc->sc_data_direction != DMA_TO_DEVICE) - virtscsi_map_sgl(sg, &idx, scsi_in(sc)); + if (in) { + /* Place READ protection SGLs before Data IN payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num + in_num++] = scsi_prot_sglist(sc); + sgs[out_num + in_num++] = in->sgl; + } - *in_num = idx - *out_num; + return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); } -static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, - struct virtio_scsi_vq *vq, +static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, struct virtio_scsi_cmd *cmd, - size_t req_size, size_t resp_size, gfp_t gfp) + size_t req_size, size_t resp_size) { - unsigned int out_num, in_num; unsigned long flags; int err; bool needs_kick = false; - spin_lock_irqsave(&tgt->tgt_lock, flags); - virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size); - - spin_lock(&vq->vq_lock); - err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp); - spin_unlock(&tgt->tgt_lock); + spin_lock_irqsave(&vq->vq_lock, flags); + err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); if (!err) needs_kick = virtqueue_kick_prepare(vq->vq); @@ -430,14 +473,46 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt, return err; } -static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) +static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd, + struct scsi_cmnd *sc) { - struct virtio_scsi *vscsi = shost_priv(sh); - struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id]; - struct virtio_scsi_cmd *cmd; - int ret; + cmd->lun[0] = 1; + cmd->lun[1] = sc->device->id; + cmd->lun[2] = (sc->device->lun >> 8) | 0x40; + cmd->lun[3] = sc->device->lun & 0xff; + cmd->tag = (unsigned long)sc; + cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; + cmd->prio = 0; + cmd->crn = 0; +} + +static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi, + struct scsi_cmnd *sc) +{ + struct request *rq = sc->request; + struct blk_integrity *bi; + + virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc); + + if (!rq || !scsi_prot_sg_count(sc)) + return; + + bi = blk_get_integrity(rq->rq_disk); + + if (sc->sc_data_direction == DMA_TO_DEVICE) + cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size; + else if (sc->sc_data_direction == DMA_FROM_DEVICE) + cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size; +} +static int virtscsi_queuecommand(struct virtio_scsi *vscsi, + struct virtio_scsi_vq *req_vq, + struct scsi_cmnd *sc) +{ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + int req_size; + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); /* TODO: check feature bit and fail if unsupported? */ @@ -446,48 +521,79 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); - ret = SCSI_MLQUEUE_HOST_BUSY; - cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); - if (!cmd) - goto out; - memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; - cmd->req.cmd = (struct virtio_scsi_cmd_req){ - .lun[0] = 1, - .lun[1] = sc->device->id, - .lun[2] = (sc->device->lun >> 8) | 0x40, - .lun[3] = sc->device->lun & 0xff, - .tag = (unsigned long)sc, - .task_attr = VIRTIO_SCSI_S_SIMPLE, - .prio = 0, - .crn = 0, - }; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); - memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); - if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd, - sizeof cmd->req.cmd, sizeof cmd->resp.cmd, - GFP_ATOMIC) == 0) - ret = 0; - else - mempool_free(cmd, virtscsi_cmd_pool); + if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { + virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc); + memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd_pi); + } else { + virtio_scsi_init_hdr(&cmd->req.cmd, sc); + memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd); + } -out: - return ret; + if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) + return SCSI_MLQUEUE_HOST_BUSY; + return 0; +} + +static int virtscsi_queuecommand_single(struct Scsi_Host *sh, + struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + + atomic_inc(&tgt->reqs); + return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); +} + +static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, + struct virtio_scsi_target_state *tgt) +{ + struct virtio_scsi_vq *vq; + unsigned long flags; + u32 queue_num; + + spin_lock_irqsave(&tgt->tgt_lock, flags); + + if (atomic_inc_return(&tgt->reqs) > 1) + vq = tgt->req_vq; + else { + queue_num = smp_processor_id(); + while (unlikely(queue_num >= vscsi->num_queues)) + queue_num -= vscsi->num_queues; + + tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; + } + + spin_unlock_irqrestore(&tgt->tgt_lock, flags); + return vq; +} + +static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, + struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt); + + return virtscsi_queuecommand(vscsi, req_vq, sc); } static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) { DECLARE_COMPLETION_ONSTACK(comp); - struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id]; int ret = FAILED; cmd->comp = ∁ - if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd, - sizeof cmd->req.tmf, sizeof cmd->resp.tmf, - GFP_NOIO) < 0) + if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, + sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0) goto out; wait_for_completion(&comp); @@ -495,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + out: mempool_free(cmd, virtscsi_cmd_pool); return ret; @@ -547,60 +665,141 @@ static int virtscsi_abort(struct scsi_cmnd *sc) return virtscsi_tmf(vscsi, cmd); } -static struct scsi_host_template virtscsi_host_template = { +static int virtscsi_target_alloc(struct scsi_target *starget) +{ + struct virtio_scsi_target_state *tgt = + kmalloc(sizeof(*tgt), GFP_KERNEL); + if (!tgt) + return -ENOMEM; + + spin_lock_init(&tgt->tgt_lock); + atomic_set(&tgt->reqs, 0); + tgt->req_vq = NULL; + + starget->hostdata = tgt; + return 0; +} + +static void virtscsi_target_destroy(struct scsi_target *starget) +{ + struct virtio_scsi_target_state *tgt = starget->hostdata; + kfree(tgt); +} + +static struct scsi_host_template virtscsi_host_template_single = { + .module = THIS_MODULE, + .name = "Virtio SCSI HBA", + .proc_name = "virtio_scsi", + .this_id = -1, + .cmd_size = sizeof(struct virtio_scsi_cmd), + .queuecommand = virtscsi_queuecommand_single, + .eh_abort_handler = virtscsi_abort, + .eh_device_reset_handler = virtscsi_device_reset, + + .can_queue = 1024, + .dma_boundary = UINT_MAX, + .use_clustering = ENABLE_CLUSTERING, + .target_alloc = virtscsi_target_alloc, + .target_destroy = virtscsi_target_destroy, +}; + +static struct scsi_host_template virtscsi_host_template_multi = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", .proc_name = "virtio_scsi", - .queuecommand = virtscsi_queuecommand, .this_id = -1, + .cmd_size = sizeof(struct virtio_scsi_cmd), + .queuecommand = virtscsi_queuecommand_multi, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, .can_queue = 1024, .dma_boundary = UINT_MAX, .use_clustering = ENABLE_CLUSTERING, + .target_alloc = virtscsi_target_alloc, + .target_destroy = virtscsi_target_destroy, }; #define virtscsi_config_get(vdev, fld) \ ({ \ typeof(((struct virtio_scsi_config *)0)->fld) __val; \ - vdev->config->get(vdev, \ - offsetof(struct virtio_scsi_config, fld), \ - &__val, sizeof(__val)); \ + virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ __val; \ }) #define virtscsi_config_set(vdev, fld, val) \ - (void)({ \ + do { \ typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ - vdev->config->set(vdev, \ - offsetof(struct virtio_scsi_config, fld), \ - &__val, sizeof(__val)); \ - }) + virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ + } while(0) -static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, - struct virtqueue *vq) +static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) { - spin_lock_init(&virtscsi_vq->vq_lock); - virtscsi_vq->vq = vq; + int i; + int cpu; + + /* In multiqueue mode, when the number of cpu is equal + * to the number of request queues, we let the qeueues + * to be private to one cpu by setting the affinity hint + * to eliminate the contention. + */ + if ((vscsi->num_queues == 1 || + vscsi->num_queues != num_online_cpus()) && affinity) { + if (vscsi->affinity_hint_set) + affinity = false; + else + return; + } + + if (affinity) { + i = 0; + for_each_online_cpu(cpu) { + virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); + i++; + } + + vscsi->affinity_hint_set = true; + } else { + for (i = 0; i < vscsi->num_queues; i++) { + if (!vscsi->req_vqs[i].vq) + continue; + + virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); + } + + vscsi->affinity_hint_set = false; + } } -static struct virtio_scsi_target_state *virtscsi_alloc_tgt( - struct virtio_device *vdev, int sg_elems) +static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) { - struct virtio_scsi_target_state *tgt; - gfp_t gfp_mask = GFP_KERNEL; - - /* We need extra sg elements at head and tail. */ - tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2), - gfp_mask); + get_online_cpus(); + __virtscsi_set_affinity(vscsi, affinity); + put_online_cpus(); +} - if (!tgt) - return NULL; +static int virtscsi_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb); + switch(action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + case CPU_DEAD: + case CPU_DEAD_FROZEN: + __virtscsi_set_affinity(vscsi, true); + break; + default: + break; + } + return NOTIFY_OK; +} - spin_lock_init(&tgt->tgt_lock); - sg_init_table(tgt->sg, sg_elems + 2); - return tgt; +static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, + struct virtqueue *vq) +{ + spin_lock_init(&virtscsi_vq->vq_lock); + virtscsi_vq->vq = vq; } static void virtscsi_scan(struct virtio_device *vdev) @@ -614,46 +813,56 @@ static void virtscsi_remove_vqs(struct virtio_device *vdev) { struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); - u32 i, num_targets; + + virtscsi_set_affinity(vscsi, false); /* Stop all the virtqueues. */ vdev->config->reset(vdev); - num_targets = sh->max_id; - for (i = 0; i < num_targets; i++) { - kfree(vscsi->tgt[i]); - vscsi->tgt[i] = NULL; - } - vdev->config->del_vqs(vdev); } static int virtscsi_init(struct virtio_device *vdev, - struct virtio_scsi *vscsi, int num_targets) + struct virtio_scsi *vscsi) { int err; - struct virtqueue *vqs[3]; - u32 i, sg_elems; + u32 i; + u32 num_vqs; + vq_callback_t **callbacks; + const char **names; + struct virtqueue **vqs; + + num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; + vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); + callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL); + names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL); + + if (!callbacks || !vqs || !names) { + err = -ENOMEM; + goto out; + } - vq_callback_t *callbacks[] = { - virtscsi_ctrl_done, - virtscsi_event_done, - virtscsi_req_done - }; - const char *names[] = { - "control", - "event", - "request" - }; + callbacks[0] = virtscsi_ctrl_done; + callbacks[1] = virtscsi_event_done; + names[0] = "control"; + names[1] = "event"; + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { + callbacks[i] = virtscsi_req_done; + names[i] = "request"; + } /* Discover virtqueues and write information to configuration. */ - err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names); + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); if (err) - return err; + goto out; virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); virtscsi_init_vq(&vscsi->event_vq, vqs[1]); - virtscsi_init_vq(&vscsi->req_vq, vqs[2]); + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) + virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], + vqs[i]); + + virtscsi_set_affinity(vscsi, true); virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); @@ -661,19 +870,12 @@ static int virtscsi_init(struct virtio_device *vdev, if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) virtscsi_kick_event_all(vscsi); - /* We need to know how many segments before we allocate. */ - sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; - - for (i = 0; i < num_targets; i++) { - vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems); - if (!vscsi->tgt[i]) { - err = -ENOMEM; - goto out; - } - } err = 0; out: + kfree(names); + kfree(callbacks); + kfree(vqs); if (err) virtscsi_remove_vqs(vdev); return err; @@ -683,16 +885,24 @@ static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; - int err; + int err, host_prot; u32 sg_elems, num_targets; u32 cmd_per_lun; + u32 num_queues; + struct scsi_host_template *hostt; + + /* We need to know how many queues before we allocate. */ + num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; - /* Allocate memory and link the structs together. */ num_targets = virtscsi_config_get(vdev, max_target) + 1; - shost = scsi_host_alloc(&virtscsi_host_template, - sizeof(*vscsi) - + num_targets * sizeof(struct virtio_scsi_target_state)); + if (num_queues == 1) + hostt = &virtscsi_host_template_single; + else + hostt = &virtscsi_host_template_multi; + + shost = scsi_host_alloc(hostt, + sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); if (!shost) return -ENOMEM; @@ -700,12 +910,20 @@ static int virtscsi_probe(struct virtio_device *vdev) shost->sg_tablesize = sg_elems; vscsi = shost_priv(shost); vscsi->vdev = vdev; + vscsi->num_queues = num_queues; vdev->priv = shost; - err = virtscsi_init(vdev, vscsi, num_targets); + err = virtscsi_init(vdev, vscsi); if (err) goto virtscsi_init_failed; + vscsi->nb.notifier_call = &virtscsi_cpu_callback; + err = register_hotcpu_notifier(&vscsi->nb); + if (err) { + pr_err("registering cpu notifier failed\n"); + goto scsi_add_host_failed; + } + cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; @@ -717,6 +935,16 @@ static int virtscsi_probe(struct virtio_device *vdev) shost->max_id = num_targets; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(shost, host_prot); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + } + err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; @@ -743,13 +971,19 @@ static void virtscsi_remove(struct virtio_device *vdev) scsi_remove_host(shost); + unregister_hotcpu_notifier(&vscsi->nb); + virtscsi_remove_vqs(vdev); scsi_host_put(shost); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + unregister_hotcpu_notifier(&vscsi->nb); virtscsi_remove_vqs(vdev); return 0; } @@ -758,8 +992,17 @@ static int virtscsi_restore(struct virtio_device *vdev) { struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); + int err; - return virtscsi_init(vdev, vscsi, sh->max_id); + err = virtscsi_init(vdev, vscsi); + if (err) + return err; + + err = register_hotcpu_notifier(&vscsi->nb); + if (err) + vdev->config->del_vqs(vdev); + + return err; } #endif @@ -771,6 +1014,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_SCSI_F_HOTPLUG, VIRTIO_SCSI_F_CHANGE, + VIRTIO_SCSI_F_T10_PI, }; static struct virtio_driver virtio_scsi_driver = { @@ -781,7 +1025,7 @@ static struct virtio_driver virtio_scsi_driver = { .id_table = id_table, .probe = virtscsi_probe, .scan = virtscsi_scan, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .freeze = virtscsi_freeze, .restore = virtscsi_restore, #endif @@ -794,8 +1038,7 @@ static int __init init(void) virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); if (!virtscsi_cmd_cache) { - printk(KERN_ERR "kmem_cache_create() for " - "virtscsi_cmd_cache failed\n"); + pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); goto error; } @@ -804,8 +1047,7 @@ static int __init init(void) mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, virtscsi_cmd_cache); if (!virtscsi_cmd_pool) { - printk(KERN_ERR "mempool_create() for" - "virtscsi_cmd_pool failed\n"); + pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } ret = register_virtio_driver(&virtio_scsi_driver); diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 3bfaa66fa0d..c88e1468aad 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's para-virtualized SCSI HBA. * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -32,6 +32,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_tcq.h> #include "vmw_pvscsi.h" @@ -44,7 +45,7 @@ MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 -#define PVSCSI_DEFAULT_QUEUE_DEPTH 64 +#define PVSCSI_DEFAULT_QUEUE_DEPTH 254 #define SGL_SIZE PAGE_SIZE struct pvscsi_sg_list { @@ -62,6 +63,7 @@ struct pvscsi_ctx { dma_addr_t dataPA; dma_addr_t sensePA; dma_addr_t sglPA; + struct completion *abort_cmp; }; struct pvscsi_adapter { @@ -71,6 +73,7 @@ struct pvscsi_adapter { bool use_msi; bool use_msix; bool use_msg; + bool use_req_threshold; spinlock_t hw_lock; @@ -102,18 +105,22 @@ struct pvscsi_adapter { /* Command line parameters */ -static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; +static int pvscsi_ring_pages; static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; static bool pvscsi_disable_msi; static bool pvscsi_disable_msix; static bool pvscsi_use_msg = true; +static bool pvscsi_use_req_threshold = true; #define PVSCSI_RW (S_IRUSR | S_IWUSR) module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" - __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); + __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) + "[up to 16 targets]," + __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) + "[for 16+ targets])"); module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" @@ -121,7 +128,7 @@ MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" - __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); + __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")"); module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); @@ -132,6 +139,10 @@ MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); +module_param_named(use_req_threshold, pvscsi_use_req_threshold, + bool, PVSCSI_RW); +MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)"); + static const struct pci_device_id pvscsi_pci_tbl[] = { { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, { 0 } @@ -177,6 +188,7 @@ static void pvscsi_release_context(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { ctx->cmd = NULL; + ctx->abort_cmp = NULL; list_add(&ctx->list, &adapter->cmd_pool); } @@ -280,10 +292,15 @@ static int scsi_is_rw(unsigned char op) static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, unsigned char op) { - if (scsi_is_rw(op)) - pvscsi_kick_rw_io(adapter); - else + if (scsi_is_rw(op)) { + struct PVSCSIRingsState *s = adapter->rings_state; + + if (!adapter->use_req_threshold || + s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) + pvscsi_kick_rw_io(adapter); + } else { pvscsi_process_request_ring(adapter); + } } static void ll_adapter_reset(const struct pvscsi_adapter *adapter) @@ -487,6 +504,35 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) } } +static int pvscsi_change_queue_depth(struct scsi_device *sdev, + int qdepth, + int reason) +{ + int max_depth; + struct Scsi_Host *shost = sdev->host; + + if (reason != SCSI_QDEPTH_DEFAULT) + /* + * We support only changing default. + */ + return -EOPNOTSUPP; + + max_depth = shost->can_queue; + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + + if (sdev->inquiry_len > 7) + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->simple_tags, sdev->ordered_tags, + sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1); + return sdev->queue_depth; +} + /* * Pull a completion descriptor off and pass the completion back * to the SCSI mid layer. @@ -496,15 +542,27 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, { struct pvscsi_ctx *ctx; struct scsi_cmnd *cmd; + struct completion *abort_cmp; u32 btstat = e->hostStatus; u32 sdstat = e->scsiStatus; ctx = pvscsi_get_context(adapter, e->context); cmd = ctx->cmd; + abort_cmp = ctx->abort_cmp; pvscsi_unmap_buffers(adapter, ctx); pvscsi_release_context(adapter, ctx); - cmd->result = 0; + if (abort_cmp) { + /* + * The command was requested to be aborted. Just signal that + * the request completed and swallow the actual cmd completion + * here. The abort handler will post a completion for this + * command indicating that it got successfully aborted. + */ + complete(abort_cmp); + return; + } + cmd->result = 0; if (sdstat != SAM_STAT_GOOD && (btstat == BTSTAT_SUCCESS || btstat == BTSTAT_LINKED_COMMAND_COMPLETED || @@ -726,6 +784,8 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); struct pvscsi_ctx *ctx; unsigned long flags; + int result = SUCCESS; + DECLARE_COMPLETION_ONSTACK(abort_cmp); scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", adapter->host->host_no, cmd); @@ -748,13 +808,40 @@ static int pvscsi_abort(struct scsi_cmnd *cmd) goto out; } + /* + * Mark that the command has been requested to be aborted and issue + * the abort. + */ + ctx->abort_cmp = &abort_cmp; + pvscsi_abort_cmd(adapter, ctx); + spin_unlock_irqrestore(&adapter->hw_lock, flags); + /* Wait for 2 secs for the completion. */ + wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); + spin_lock_irqsave(&adapter->hw_lock, flags); - pvscsi_process_completion_ring(adapter); + if (!completion_done(&abort_cmp)) { + /* + * Failed to abort the command, unmark the fact that it + * was requested to be aborted. + */ + ctx->abort_cmp = NULL; + result = FAILED; + scmd_printk(KERN_DEBUG, cmd, + "Failed to get completion for aborted cmd %p\n", + cmd); + goto out; + } + + /* + * Successfully aborted the command. + */ + cmd->result = (DID_ABORT << 16); + cmd->scsi_done(cmd); out: spin_unlock_irqrestore(&adapter->hw_lock, flags); - return SUCCESS; + return result; } /* @@ -911,6 +998,7 @@ static struct scsi_host_template pvscsi_template = { .dma_boundary = UINT_MAX, .max_sectors = 0xffff, .use_clustering = ENABLE_CLUSTERING, + .change_queue_depth = pvscsi_change_queue_depth, .eh_abort_handler = pvscsi_abort, .eh_device_reset_handler = pvscsi_device_reset, .eh_bus_reset_handler = pvscsi_bus_reset, @@ -1034,6 +1122,34 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) return 1; } +static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter, + bool enable) +{ + u32 val; + + if (!pvscsi_use_req_threshold) + return false; + + pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, + PVSCSI_CMD_SETUP_REQCALLTHRESHOLD); + val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS); + if (val == -1) { + printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n"); + return false; + } else { + struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 }; + cmd_msg.enable = enable; + printk(KERN_INFO + "vmw_pvscsi: %sabling reqCallThreshold\n", + enable ? "en" : "dis"); + pvscsi_write_cmd_desc(adapter, + PVSCSI_CMD_SETUP_REQCALLTHRESHOLD, + &cmd_msg, sizeof(cmd_msg)); + return pvscsi_reg_read(adapter, + PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0; + } +} + static irqreturn_t pvscsi_isr(int irq, void *devp) { struct pvscsi_adapter *adapter = devp; @@ -1236,11 +1352,12 @@ exit: static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pvscsi_adapter *adapter; - struct Scsi_Host *host; - struct device *dev; + struct pvscsi_adapter adapter_temp; + struct Scsi_Host *host = NULL; unsigned int i; unsigned long flags = 0; int error; + u32 max_id; error = -ENODEV; @@ -1258,34 +1375,19 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_disable_device; } - pvscsi_template.can_queue = - min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * - PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; - pvscsi_template.cmd_per_lun = - min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); - host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); - if (!host) { - printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); - goto out_disable_device; - } - - adapter = shost_priv(host); + /* + * Let's use a temp pvscsi_adapter struct until we find the number of + * targets on the adapter, after that we will switch to the real + * allocated struct. + */ + adapter = &adapter_temp; memset(adapter, 0, sizeof(*adapter)); adapter->dev = pdev; - adapter->host = host; - - spin_lock_init(&adapter->hw_lock); - - host->max_channel = 0; - host->max_id = 16; - host->max_lun = 1; - host->max_cmd_len = 16; - adapter->rev = pdev->revision; if (pci_request_regions(pdev, "vmw_pvscsi")) { printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); - goto out_free_host; + goto out_disable_device; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -1301,7 +1403,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (i == DEVICE_COUNT_RESOURCE) { printk(KERN_ERR "vmw_pvscsi: adapter has no suitable MMIO region\n"); - goto out_release_resources; + goto out_release_resources_and_disable; } adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); @@ -1310,10 +1412,60 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) printk(KERN_ERR "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", i, PVSCSI_MEM_SPACE_SIZE); - goto out_release_resources; + goto out_release_resources_and_disable; } pci_set_master(pdev); + + /* + * Ask the device for max number of targets before deciding the + * default pvscsi_ring_pages value. + */ + max_id = pvscsi_get_max_targets(adapter); + printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id); + + if (pvscsi_ring_pages == 0) + /* + * Set the right default value. Up to 16 it is 8, above it is + * max. + */ + pvscsi_ring_pages = (max_id > 16) ? + PVSCSI_SETUP_RINGS_MAX_NUM_PAGES : + PVSCSI_DEFAULT_NUM_PAGES_PER_RING; + printk(KERN_INFO + "vmw_pvscsi: setting ring_pages to %d\n", + pvscsi_ring_pages); + + pvscsi_template.can_queue = + min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * + PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + pvscsi_template.cmd_per_lun = + min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); + host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); + if (!host) { + printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); + goto out_release_resources_and_disable; + } + + /* + * Let's use the real pvscsi_adapter struct here onwards. + */ + adapter = shost_priv(host); + memset(adapter, 0, sizeof(*adapter)); + adapter->dev = pdev; + adapter->host = host; + /* + * Copy back what we already have to the allocated adapter struct. + */ + adapter->rev = adapter_temp.rev; + adapter->mmioBase = adapter_temp.mmioBase; + + spin_lock_init(&adapter->hw_lock); + host->max_channel = 0; + host->max_lun = 1; + host->max_cmd_len = 16; + host->max_id = max_id; + pci_set_drvdata(pdev, host); ll_adapter_reset(adapter); @@ -1327,13 +1479,6 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* - * Ask the device for max number of targets. - */ - host->max_id = pvscsi_get_max_targets(adapter); - dev = pvscsi_dev(adapter); - dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id); - - /* * From this point on we should reset the adapter if anything goes * wrong. */ @@ -1373,6 +1518,10 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) flags = IRQF_SHARED; } + adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); + printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", + adapter->use_req_threshold ? "en" : "dis"); + error = request_irq(adapter->irq, pvscsi_isr, flags, "vmw_pvscsi", adapter); if (error) { @@ -1402,13 +1551,15 @@ out_reset_adapter: ll_adapter_reset(adapter); out_release_resources: pvscsi_release_resources(adapter); -out_free_host: scsi_host_put(host); out_disable_device: - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return error; + +out_release_resources_and_disable: + pvscsi_release_resources(adapter); + goto out_disable_device; } static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) @@ -1445,7 +1596,6 @@ static void pvscsi_remove(struct pci_dev *pdev) scsi_host_put(host); - pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); } diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 3546e8662e3..ce458885127 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -1,7 +1,7 @@ /* * VMware PVSCSI header file * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -26,7 +26,7 @@ #include <linux/types.h> -#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k" +#define PVSCSI_DRIVER_VERSION_STRING "1.0.5.0-k" #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 @@ -117,8 +117,9 @@ enum PVSCSICommands { PVSCSI_CMD_CONFIG = 7, PVSCSI_CMD_SETUP_MSG_RING = 8, PVSCSI_CMD_DEVICE_UNPLUG = 9, + PVSCSI_CMD_SETUP_REQCALLTHRESHOLD = 10, - PVSCSI_CMD_LAST = 10 /* has to be last */ + PVSCSI_CMD_LAST = 11 /* has to be last */ }; /* @@ -141,6 +142,14 @@ struct PVSCSICmdDescConfigCmd { u32 _pad; } __packed; +/* + * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD -- + */ + +struct PVSCSICmdDescSetupReqCall { + u32 enable; +} __packed; + enum PVSCSIConfigPageType { PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958, PVSCSI_CONFIG_PAGE_PHY = 0x1959, @@ -261,7 +270,9 @@ struct PVSCSIRingsState { u32 cmpConsIdx; u32 cmpNumEntriesLog2; - u8 _pad[104]; + u32 reqCallThreshold; + + u8 _pad[100]; u32 msgProdIdx; u32 msgConsIdx; diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index c0ee4ea28a1..41883a87931 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -2054,22 +2054,16 @@ wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs, printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE); } -int -wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in) +int wd33c93_write_info(struct Scsi_Host *instance, char *buf, int len) { - #ifdef PROC_INTERFACE - char *bp; - char tbuf[128]; struct WD33C93_hostdata *hd; - struct scsi_cmnd *cmd; int x; - static int stop = 0; hd = (struct WD33C93_hostdata *) instance->hostdata; -/* If 'in' is TRUE we need to _read_ the proc file. We accept the following +/* We accept the following * keywords (same format as command-line, but arguments are not optional): * debug * disconnect @@ -2083,145 +2077,124 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off * nosync */ - if (in) { - buf[len] = '\0'; - for (bp = buf; *bp; ) { - while (',' == *bp || ' ' == *bp) - ++bp; - if (!strncmp(bp, "debug:", 6)) { - hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; - } else if (!strncmp(bp, "disconnect:", 11)) { - x = simple_strtoul(bp+11, &bp, 0); - if (x < DIS_NEVER || x > DIS_ALWAYS) - x = DIS_ADAPTIVE; - hd->disconnect = x; - } else if (!strncmp(bp, "period:", 7)) { + buf[len] = '\0'; + for (bp = buf; *bp; ) { + while (',' == *bp || ' ' == *bp) + ++bp; + if (!strncmp(bp, "debug:", 6)) { + hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; + } else if (!strncmp(bp, "disconnect:", 11)) { + x = simple_strtoul(bp+11, &bp, 0); + if (x < DIS_NEVER || x > DIS_ALWAYS) + x = DIS_ADAPTIVE; + hd->disconnect = x; + } else if (!strncmp(bp, "period:", 7)) { + x = simple_strtoul(bp+7, &bp, 0); + hd->default_sx_per = + hd->sx_table[round_period((unsigned int) x, + hd->sx_table)].period_ns; + } else if (!strncmp(bp, "resync:", 7)) { + set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); + } else if (!strncmp(bp, "proc:", 5)) { + hd->proc = simple_strtoul(bp+5, &bp, 0); + } else if (!strncmp(bp, "nodma:", 6)) { + hd->no_dma = simple_strtoul(bp+6, &bp, 0); + } else if (!strncmp(bp, "level2:", 7)) { + hd->level2 = simple_strtoul(bp+7, &bp, 0); + } else if (!strncmp(bp, "burst:", 6)) { + hd->dma_mode = + simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA; + } else if (!strncmp(bp, "fast:", 5)) { + x = !!simple_strtol(bp+5, &bp, 0); + if (x != hd->fast) + set_resync(hd, 0xff); + hd->fast = x; + } else if (!strncmp(bp, "nosync:", 7)) { x = simple_strtoul(bp+7, &bp, 0); - hd->default_sx_per = - hd->sx_table[round_period((unsigned int) x, - hd->sx_table)].period_ns; - } else if (!strncmp(bp, "resync:", 7)) { - set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); - } else if (!strncmp(bp, "proc:", 5)) { - hd->proc = simple_strtoul(bp+5, &bp, 0); - } else if (!strncmp(bp, "nodma:", 6)) { - hd->no_dma = simple_strtoul(bp+6, &bp, 0); - } else if (!strncmp(bp, "level2:", 7)) { - hd->level2 = simple_strtoul(bp+7, &bp, 0); - } else if (!strncmp(bp, "burst:", 6)) { - hd->dma_mode = - simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA; - } else if (!strncmp(bp, "fast:", 5)) { - x = !!simple_strtol(bp+5, &bp, 0); - if (x != hd->fast) - set_resync(hd, 0xff); - hd->fast = x; - } else if (!strncmp(bp, "nosync:", 7)) { - x = simple_strtoul(bp+7, &bp, 0); - set_resync(hd, x ^ hd->no_sync); - hd->no_sync = x; - } else { - break; /* unknown keyword,syntax-error,... */ - } + set_resync(hd, x ^ hd->no_sync); + hd->no_sync = x; + } else { + break; /* unknown keyword,syntax-error,... */ } - return len; } + return len; +#else + return 0; +#endif +} + +int +wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance) +{ +#ifdef PROC_INTERFACE + struct WD33C93_hostdata *hd; + struct scsi_cmnd *cmd; + int x; + + hd = (struct WD33C93_hostdata *) instance->hostdata; spin_lock_irq(&hd->lock); - bp = buf; - *bp = '\0'; - if (hd->proc & PR_VERSION) { - sprintf(tbuf, "\nVersion %s - %s.", + if (hd->proc & PR_VERSION) + seq_printf(m, "\nVersion %s - %s.", WD33C93_VERSION, WD33C93_DATE); - strcat(bp, tbuf); - } + if (hd->proc & PR_INFO) { - sprintf(tbuf, "\nclock_freq=%02x no_sync=%02x no_dma=%d" + seq_printf(m, "\nclock_freq=%02x no_sync=%02x no_dma=%d" " dma_mode=%02x fast=%d", hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); - strcat(bp, tbuf); - strcat(bp, "\nsync_xfer[] = "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%02x", hd->sync_xfer[x]); - strcat(bp, tbuf); - } - strcat(bp, "\nsync_stat[] = "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%02x", hd->sync_stat[x]); - strcat(bp, tbuf); - } + seq_printf(m, "\nsync_xfer[] = "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%02x", hd->sync_xfer[x]); + seq_printf(m, "\nsync_stat[] = "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%02x", hd->sync_stat[x]); } #ifdef PROC_STATISTICS if (hd->proc & PR_STATISTICS) { - strcat(bp, "\ncommands issued: "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]); - strcat(bp, tbuf); - } - strcat(bp, "\ndisconnects allowed:"); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]); - strcat(bp, tbuf); - } - strcat(bp, "\ndisconnects done: "); - for (x = 0; x < 7; x++) { - sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]); - strcat(bp, tbuf); - } - sprintf(tbuf, + seq_printf(m, "\ncommands issued: "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->cmd_cnt[x]); + seq_printf(m, "\ndisconnects allowed:"); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); + seq_printf(m, "\ndisconnects done: "); + for (x = 0; x < 7; x++) + seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); + seq_printf(m, "\ninterrupts: %ld, DATA_PHASE ints: %ld DMA, %ld PIO", hd->int_cnt, hd->dma_cnt, hd->pio_cnt); - strcat(bp, tbuf); } #endif if (hd->proc & PR_CONNECTED) { - strcat(bp, "\nconnected: "); + seq_printf(m, "\nconnected: "); if (hd->connected) { cmd = (struct scsi_cmnd *) hd->connected; - sprintf(tbuf, " %d:%d(%02x)", + seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); - strcat(bp, tbuf); } } if (hd->proc & PR_INPUTQ) { - strcat(bp, "\ninput_Q: "); + seq_printf(m, "\ninput_Q: "); cmd = (struct scsi_cmnd *) hd->input_Q; while (cmd) { - sprintf(tbuf, " %d:%d(%02x)", + seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); - strcat(bp, tbuf); cmd = (struct scsi_cmnd *) cmd->host_scribble; } } if (hd->proc & PR_DISCQ) { - strcat(bp, "\ndisconnected_Q:"); + seq_printf(m, "\ndisconnected_Q:"); cmd = (struct scsi_cmnd *) hd->disconnected_Q; while (cmd) { - sprintf(tbuf, " %d:%d(%02x)", + seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); - strcat(bp, tbuf); cmd = (struct scsi_cmnd *) cmd->host_scribble; } } - strcat(bp, "\n"); + seq_printf(m, "\n"); spin_unlock_irq(&hd->lock); - *start = buf; - if (stop) { - stop = 0; - return 0; - } - if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */ - stop = 1; - if (hd->proc & PR_STOP) /* stop every other time */ - stop = 1; - return strlen(bp); - -#else /* PROC_INTERFACE */ - - return 0; - #endif /* PROC_INTERFACE */ - + return 0; } EXPORT_SYMBOL(wd33c93_host_reset); @@ -2229,4 +2202,5 @@ EXPORT_SYMBOL(wd33c93_init); EXPORT_SYMBOL(wd33c93_abort); EXPORT_SYMBOL(wd33c93_queuecommand); EXPORT_SYMBOL(wd33c93_intr); -EXPORT_SYMBOL(wd33c93_proc_info); +EXPORT_SYMBOL(wd33c93_show_info); +EXPORT_SYMBOL(wd33c93_write_info); diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h index 3b463d7304d..08abe508e9a 100644 --- a/drivers/scsi/wd33c93.h +++ b/drivers/scsi/wd33c93.h @@ -345,7 +345,8 @@ void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, int wd33c93_abort (struct scsi_cmnd *cmd); int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd); void wd33c93_intr (struct Scsi_Host *instance); -int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); +int wd33c93_show_info(struct seq_file *, struct Scsi_Host *); +int wd33c93_write_info(struct Scsi_Host *, char *, int); int wd33c93_host_reset (struct scsi_cmnd *); #endif /* WD33C93_H */ diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index d89a5dfd3ad..32674236fec 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c @@ -1252,7 +1252,7 @@ static int wd7000_init(Adapter * host) return 0; - if (request_irq(host->irq, wd7000_intr, IRQF_DISABLED, "wd7000", host)) { + if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) { printk("wd7000_init: can't get IRQ %d.\n", host->irq); return (0); } @@ -1296,9 +1296,9 @@ static void wd7000_revision(Adapter * host) #undef SPRINTF -#define SPRINTF(args...) { if (pos < (buffer + length)) pos += sprintf (pos, ## args); } +#define SPRINTF(args...) { seq_printf(m, ## args); } -static int wd7000_set_info(char *buffer, int length, struct Scsi_Host *host) +static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length) { dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length); @@ -1310,22 +1310,15 @@ static int wd7000_set_info(char *buffer, int length, struct Scsi_Host *host) } -static int wd7000_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) +static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host) { Adapter *adapter = (Adapter *)host->hostdata; unsigned long flags; - char *pos = buffer; #ifdef WD7000_DEBUG Mailbox *ogmbs, *icmbs; short count; #endif - /* - * Has data been written to the file ? - */ - if (inout) - return (wd7000_set_info(buffer, length, host)); - spin_lock_irqsave(host->host_lock, flags); SPRINTF("Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2); SPRINTF(" IO base: 0x%x\n", adapter->iobase); @@ -1368,17 +1361,7 @@ static int wd7000_proc_info(struct Scsi_Host *host, char *buffer, char **start, spin_unlock_irqrestore(host->host_lock, flags); - /* - * Calculate start of next buffer, and return value. - */ - *start = buffer + offset; - - if ((pos - buffer) < offset) - return (0); - else if ((pos - buffer - offset) < length) - return (pos - buffer - offset); - else - return (length); + return 0; } @@ -1413,7 +1396,8 @@ static __init int wd7000_detect(struct scsi_host_template *tpnt) for (i = 0; i < NUM_CONFIGS; biosptr[i++] = -1); tpnt->proc_name = "wd7000"; - tpnt->proc_info = &wd7000_proc_info; + tpnt->show_info = &wd7000_show_info; + tpnt->write_info = wd7000_set_info; /* * Set up SCB free list, which is shared by all adapters @@ -1658,7 +1642,8 @@ MODULE_LICENSE("GPL"); static struct scsi_host_template driver_template = { .proc_name = "wd7000", - .proc_info = wd7000_proc_info, + .show_info = wd7000_show_info, + .write_info = wd7000_set_info, .name = "Western Digital WD-7000", .detect = wd7000_detect, .release = wd7000_release, diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index cbf3476c68c..aff31991aea 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -104,7 +104,7 @@ static int zorro7xx_init_one(struct zorro_dev *z, if (ioaddr > 0x01000000) hostdata->base = ioremap(ioaddr, zorro_resource_len(z)); else - hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr); + hostdata->base = ZTWO_VADDR(ioaddr); hostdata->clock = 50; hostdata->chip710 = 1; |
