diff options
author | Nick Cheng <nick.cheng@areca.com.tw> | 2010-06-18 15:39:12 +0800 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-07-27 12:01:53 -0500 |
commit | ae52e7f09ff509df11cd408eabe90132b6be1231 (patch) | |
tree | 601dd812c670fe1c586a514a6785324855fdc098 /drivers/scsi/arcmsr/arcmsr_hba.c | |
parent | f034260db330bb3ffc815fcb682b1c84aca09591 (diff) |
[SCSI] arcmsr: Support 1024 scatter-gather list entries and improve AP while FW trapped and behaviors of EHs
1. To support 4M/1024 scatter-gather list entry, reorganize struct
ARCMSR_CDB and struct CommandControlBlock
2. To modify arcmsr_probe
3. In order to help fix F/W issue, add the driver mode for type B card
4. To improve AP's behavior while F/W resets
5. To unify struct MessageUnit_B's members' naming in all OS drivers'
6. To improve error handlers, arcmsr_bus_reset(), arcmsr_abort()
7. To fix the arcmsr_queue_command() in bus reset stage, just let the
commands pass down to FW, don't block
Signed-off-by: Nick Cheng <nick.cheng@areca.com.tw>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/arcmsr/arcmsr_hba.c')
-rw-r--r-- | drivers/scsi/arcmsr/arcmsr_hba.c | 1225 |
1 files changed, 620 insertions, 605 deletions
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ffa54792bb3..ba33473b27a 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -58,7 +58,6 @@ #include <linux/timer.h> #include <linux/pci.h> #include <linux/aer.h> -#include <linux/slab.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/system.h> @@ -71,20 +70,13 @@ #include <scsi/scsi_transport.h> #include <scsi/scsicam.h> #include "arcmsr.h" - -#ifdef CONFIG_SCSI_ARCMSR_RESET - static int sleeptime = 20; - static int retrycount = 12; - module_param(sleeptime, int, S_IRUGO|S_IWUSR); - MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset"); - module_param(retrycount, int, S_IRUGO|S_IWUSR); - MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset"); -#endif -MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); -MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter"); +MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>"); +MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx) SATA/SAS RAID Host Bus Adapter"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); - +static int sleeptime = 20; +static int retrycount = 12; +wait_queue_head_t wait_q; static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); @@ -108,7 +100,7 @@ static void arcmsr_request_device_map(unsigned long pacb); static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); static void arcmsr_message_isr_bh_fn(struct work_struct *work); -static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode); +static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); @@ -135,10 +127,10 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .eh_bus_reset_handler = arcmsr_bus_reset, .bios_param = arcmsr_bios_param, .change_queue_depth = arcmsr_adjust_disk_queue_depth, - .can_queue = ARCMSR_MAX_OUTSTANDING_CMD, + .can_queue = ARCMSR_MAX_FREECCB_NUM, .this_id = ARCMSR_SCSI_INITIATOR_ID, - .sg_tablesize = ARCMSR_MAX_SG_ENTRIES, - .max_sectors = ARCMSR_MAX_XFER_SECTORS, + .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, + .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = arcmsr_host_attrs, @@ -162,6 +154,7 @@ static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)}, + {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)}, {0, 0}, /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); @@ -173,15 +166,72 @@ static struct pci_driver arcmsr_pci_driver = { .shutdown = arcmsr_shutdown, }; +static void arcmsr_free_mu(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: + break; + case ACB_ADAPTER_TYPE_B:{ + struct MessageUnit_B *reg = acb->pmuB; + dma_free_coherent(&acb->pdev->dev, + sizeof(struct MessageUnit_B), + reg, acb->dma_coherent_handle_hbb_mu); + } + } +} + +static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) +{ + struct pci_dev *pdev = acb->pdev; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A:{ + acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!acb->pmuA) { + printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); + return false; + } + break; + } + case ACB_ADAPTER_TYPE_B:{ + void __iomem *mem_base0, *mem_base1; + mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!mem_base0) { + printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); + return false; + } + mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); + if (!mem_base1) { + iounmap(mem_base0); + printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); + return false; + } + acb->mem_base0 = mem_base0; + acb->mem_base1 = mem_base1; + } + } + return true; +} + +static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) +{ + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A:{ + iounmap(acb->pmuA); + } + case ACB_ADAPTER_TYPE_B:{ + iounmap(acb->mem_base0); + iounmap(acb->mem_base1); + } + } +} + static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) { irqreturn_t handle_state; struct AdapterControlBlock *acb = dev_id; - spin_lock(acb->host->host_lock); handle_state = arcmsr_interrupt(acb); - spin_unlock(acb->host->host_lock); - return handle_state; } @@ -218,6 +268,7 @@ static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb) struct pci_dev *pdev = acb->pdev; u16 dev_id; pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); + acb->dev_id = dev_id; switch (dev_id) { case 0x1201 : { acb->adapter_type = ACB_ADAPTER_TYPE_B; @@ -228,141 +279,210 @@ static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb) } } -static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) +static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + uint32_t Index; + uint8_t Retries = 0x00; + + do { + for (Index = 0; Index < 100; Index++) { + if (readl(®->outbound_intstatus) & + ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { + writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, + ®->outbound_intstatus); + return 0x00; + } + msleep(10); + } /*max 1 seconds*/ + + } while (Retries++ < 20);/*max 20 sec*/ + return 0xff; +} + +static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { + struct MessageUnit_B *reg = acb->pmuB; + uint32_t Index; + uint8_t Retries = 0x00; + do { + for (Index = 0; Index < 100; Index++) { + if (readl(reg->iop2drv_doorbell) + & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { + writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN + , reg->iop2drv_doorbell); + writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); + return 0x00; + } + msleep(10); + } /*max 1 seconds*/ + + } while (Retries++ < 20);/*max 20 sec*/ + return 0xff; +} + +static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + int retry_count = 30; + + writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); + do { + if (!arcmsr_hba_wait_msgint_ready(acb)) + break; + else { + retry_count--; + printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ + timeout, retry count down = %d \n", acb->host->host_no, retry_count); + } + } while (retry_count != 0); +} + +static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + int retry_count = 30; + + writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); + do { + if (!arcmsr_hbb_wait_msgint_ready(acb)) + break; + else { + retry_count--; + printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ + timeout,retry count down = %d \n", acb->host->host_no, retry_count); + } + } while (retry_count != 0); +} + +static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) +{ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - struct pci_dev *pdev = acb->pdev; - void *dma_coherent; - dma_addr_t dma_coherent_handle, dma_addr; - struct CommandControlBlock *ccb_tmp; - int i, j; + arcmsr_flush_hba_cache(acb); + } + break; - acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); - if (!acb->pmuA) { - printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", - acb->host->host_no); - return -ENOMEM; + case ACB_ADAPTER_TYPE_B: { + arcmsr_flush_hbb_cache(acb); } + } +} - dma_coherent = dma_alloc_coherent(&pdev->dev, - ARCMSR_MAX_FREECCB_NUM * - sizeof (struct CommandControlBlock) + 0x20, - &dma_coherent_handle, GFP_KERNEL); +static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) +{ + struct pci_dev *pdev = acb->pdev; + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + void *dma_coherent; + dma_addr_t dma_coherent_handle; + struct CommandControlBlock *ccb_tmp; + int i = 0, j = 0; + dma_addr_t cdb_phyaddr; + unsigned long roundup_ccbsize = 0; + unsigned long max_xfer_len; + unsigned long max_sg_entrys; + uint32_t firm_config_version; + + for (i = 0; i < ARCMSR_MAX_TARGETID; i++) + for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) + acb->devstate[i][j] = ARECA_RAID_GONE; + + max_xfer_len = ARCMSR_MAX_XFER_LEN; + max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; + firm_config_version = acb->firm_cfg_version; + if ((firm_config_version & 0xFF) >= 3) { + max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 16M byte */ + max_sg_entrys = (max_xfer_len/4096); + } + acb->host->max_sectors = max_xfer_len/512; + acb->host->sg_tablesize = max_sg_entrys; + roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + max_sg_entrys * sizeof(struct SG64ENTRY), 32); + acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM; + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent) { - iounmap(acb->pmuA); + printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error \n", acb->host->host_no); return -ENOMEM; } - + memset(dma_coherent, 0, acb->uncache_size); acb->dma_coherent = dma_coherent; acb->dma_coherent_handle = dma_coherent_handle; - - if (((unsigned long)dma_coherent & 0x1F)) { - dma_coherent = dma_coherent + - (0x20 - ((unsigned long)dma_coherent & 0x1F)); - dma_coherent_handle = dma_coherent_handle + - (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); - } - - dma_addr = dma_coherent_handle; ccb_tmp = (struct CommandControlBlock *)dma_coherent; + acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { - ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5; - ccb_tmp->acb = acb; + cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); + ccb_tmp->shifted_cdb_phyaddr = cdb_phyaddr >> 5; acb->pccb_pool[i] = ccb_tmp; + ccb_tmp->acb = acb; + INIT_LIST_HEAD(&ccb_tmp->list); list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); - dma_addr = dma_addr + sizeof(struct CommandControlBlock); - ccb_tmp++; - } - - acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; - for (i = 0; i < ARCMSR_MAX_TARGETID; i++) - for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) - acb->devstate[i][j] = ARECA_RAID_GONE; + ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); + dma_coherent_handle = dma_coherent_handle + roundup_ccbsize; } break; - + } case ACB_ADAPTER_TYPE_B: { - struct pci_dev *pdev = acb->pdev; - struct MessageUnit_B *reg; - void __iomem *mem_base0, *mem_base1; void *dma_coherent; - dma_addr_t dma_coherent_handle, dma_addr; + dma_addr_t dma_coherent_handle; struct CommandControlBlock *ccb_tmp; - int i, j; - - dma_coherent = dma_alloc_coherent(&pdev->dev, - ((ARCMSR_MAX_FREECCB_NUM * - sizeof(struct CommandControlBlock) + 0x20) + - sizeof(struct MessageUnit_B)), + uint32_t cdb_phyaddr; + unsigned int roundup_ccbsize = 0; + unsigned long max_xfer_len; + unsigned long max_sg_entrys; + unsigned long firm_config_version; + unsigned long max_freeccb_num = 0; + int i = 0, j = 0; + + max_freeccb_num = ARCMSR_MAX_FREECCB_NUM; + max_xfer_len = ARCMSR_MAX_XFER_LEN; + max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; + firm_config_version = acb->firm_cfg_version; + if ((firm_config_version & 0xFF) >= 3) { + max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << + ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 16M byte */ + max_sg_entrys = (max_xfer_len/4096);/* max 4097 sg entry*/ + } + acb->host->max_sectors = max_xfer_len / 512; + acb->host->sg_tablesize = max_sg_entrys; + roundup_ccbsize = roundup(sizeof(struct CommandControlBlock)+ + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); + acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM; + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); - if (!dma_coherent) - return -ENOMEM; + if (!dma_coherent) { + printk(KERN_NOTICE "DMA allocation failed...........................\n"); + return -ENOMEM; + } + memset(dma_coherent, 0, acb->uncache_size); acb->dma_coherent = dma_coherent; acb->dma_coherent_handle = dma_coherent_handle; - - if (((unsigned long)dma_coherent & 0x1F)) { - dma_coherent = dma_coherent + - (0x20 - ((unsigned long)dma_coherent & 0x1F)); - dma_coherent_handle = dma_coherent_handle + - (0x20 - ((unsigned long)dma_coherent_handle & 0x1F)); - } - - dma_addr = dma_coherent_handle; ccb_tmp = (struct CommandControlBlock *)dma_coherent; + acb->vir2phy_offset = (unsigned long)dma_coherent - + (unsigned long)dma_coherent_handle; for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { - ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5; - ccb_tmp->acb = acb; + cdb_phyaddr = dma_coherent_handle + + offsetof(struct CommandControlBlock, arcmsr_cdb); + ccb_tmp->shifted_cdb_phyaddr = cdb_phyaddr >> 5; acb->pccb_pool[i] = ccb_tmp; + ccb_tmp->acb = acb; + INIT_LIST_HEAD(&ccb_tmp->list); list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); - dma_addr = dma_addr + sizeof(struct CommandControlBlock); - ccb_tmp++; - } - - reg = (struct MessageUnit_B *)(dma_coherent + - ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); - acb->pmuB = reg; - mem_base0 = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!mem_base0) - goto out; - - mem_base1 = ioremap(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - if (!mem_base1) { - iounmap(mem_base0); - goto out; + ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + + roundup_ccbsize); + dma_coherent_handle = dma_coherent_handle + roundup_ccbsize; } - - reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL; - reg->drv2iop_doorbell_mask_reg = mem_base0 + - ARCMSR_DRV2IOP_DOORBELL_MASK; - reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL; - reg->iop2drv_doorbell_mask_reg = mem_base0 + - ARCMSR_IOP2DRV_DOORBELL_MASK; - reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER; - reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER; - reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER; - - acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) - acb->devstate[i][j] = ARECA_RAID_GOOD; + acb->devstate[i][j] = ARECA_RAID_GONE; } break; } return 0; - -out: - dma_free_coherent(&acb->pdev->dev, - (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + - sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); - return -ENOMEM; } static void arcmsr_message_isr_bh_fn(struct work_struct *work) { @@ -411,8 +531,8 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; char *acb_dev_map = (char *)acb->device_map; - uint32_t __iomem *signature = (uint32_t __iomem *)(®->msgcode_rwbuffer_reg[0]); - char __iomem *devicemap = (char __iomem *)(®->msgcode_rwbuffer_reg[21]); + uint32_t __iomem *signature = (uint32_t __iomem *)(®->message_rwbuffer[0]); + char __iomem *devicemap = (char __iomem *)(®->message_rwbuffer[21]); int target, lun; struct scsi_device *psdev; char diff; @@ -447,8 +567,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work) } } -static int arcmsr_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *host; struct AdapterControlBlock *acb; @@ -456,19 +575,13 @@ static int arcmsr_probe(struct pci_dev *pdev, int error; error = pci_enable_device(pdev); - if (error) - goto out; - pci_set_master(pdev); - - host = scsi_host_alloc(&arcmsr_scsi_host_template, - sizeof(struct AdapterControlBlock)); + if (error) { + return -ENODEV; + } + host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock)); if (!host) { - error = -ENOMEM; - goto out_disable_device; + goto pci_disable_dev; } - acb = (struct AdapterControlBlock *)host->hostdata; - memset(acb, 0, sizeof (struct AdapterControlBlock)); - error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (error) { error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -476,126 +589,90 @@ static int arcmsr_probe(struct pci_dev *pdev, printk(KERN_WARNING "scsi%d: No suitable DMA mask available\n", host->host_no); - goto out_host_put; + goto scsi_host_release; } } + init_waitqueue_head(&wait_q); bus = pdev->bus->number; dev_fun = pdev->devfn; - acb->host = host; + acb = (struct AdapterControlBlock *) host->hostdata; + memset(acb, 0, sizeof(struct AdapterControlBlock)); acb->pdev = pdev; - host->max_sectors = ARCMSR_MAX_XFER_SECTORS; + acb->host = host; host->max_lun = ARCMSR_MAX_TARGETLUN; host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/ host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/ - host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES; host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */ host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN; host->this_id = ARCMSR_SCSI_INITIATOR_ID; host->unique_id = (bus << 8) | dev_fun; - host->irq = pdev->irq; + pci_set_drvdata(pdev, host); + pci_set_master(pdev); error = pci_request_regions(pdev, "arcmsr"); if (error) { - goto out_host_put; + goto scsi_host_release; } - arcmsr_define_adapter_type(acb); - + spin_lock_init(&acb->eh_lock); + spin_lock_init(&acb->ccblist_lock); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; INIT_LIST_HEAD(&acb->ccb_free_list); - INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); + arcmsr_define_adapter_type(acb); + error = arcmsr_remap_pciregion(acb); + if (!error) { + goto pci_release_regs; + } + error = arcmsr_get_firmware_spec(acb); + if (!error) { + goto unmap_pci_region; + } error = arcmsr_alloc_ccb_pool(acb); - if (error) - goto out_release_regions; - + if (error) { + goto free_hbb_mu; + } arcmsr_iop_init(acb); - error = request_irq(pdev->irq, arcmsr_do_interrupt, - IRQF_SHARED, "arcmsr", acb); - if (error) - goto out_free_ccb_pool; - - pci_set_drvdata(pdev, host); - if (strncmp(acb->firm_version, "V1.42", 5) >= 0) - host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; - error = scsi_add_host(host, &pdev->dev); - if (error) - goto out_free_irq; - - error = arcmsr_alloc_sysfs_attr(acb); - if (error) - goto out_free_sysfs; - + if (error) { + goto RAID_controller_stop; + } + error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb); + if (error) { + goto scsi_host_remove; + } + host->irq = pdev->irq; scsi_scan_host(host); - #ifdef CONFIG_SCSI_ARCMSR_AER - pci_enable_pcie_error_reporting(pdev); - #endif + INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); atomic_set(&acb->rq_map_token, 16); - acb->fw_state = true; + atomic_set(&acb->ante_token_value, 16); + acb->fw_flag = FW_NORMAL; init_timer(&acb->eternal_timer); - acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ); + acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); acb->eternal_timer.data = (unsigned long) acb; acb->eternal_timer.function = &arcmsr_request_device_map; add_timer(&acb->eternal_timer); - + if (arcmsr_alloc_sysfs_attr(acb)) + goto out_free_sysfs; return 0; out_free_sysfs: - out_free_irq: - free_irq(pdev->irq, acb); - out_free_ccb_pool: +scsi_host_remove: + scsi_remove_host(host); +RAID_controller_stop: + arcmsr_stop_adapter_bgrb(acb); + arcmsr_flush_adapter_cache(acb); arcmsr_free_ccb_pool(acb); - out_release_regions: +free_hbb_mu: + arcmsr_free_mu(acb); +unmap_pci_region: + arcmsr_unmap_pciregion(acb); +pci_release_regs: pci_release_regions(pdev); - out_host_put: +scsi_host_release: scsi_host_put(host); - out_disable_device: +pci_disable_dev: pci_disable_device(pdev); - out: - return error; -} - -static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) -{ - struct MessageUnit_A __iomem *reg = acb->pmuA; - uint32_t Index; - uint8_t Retries = 0x00; - - do { - for (Index = 0; Index < 100; Index++) { - if (readl(®->outbound_intstatus) & - ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { - writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, - ®->outbound_intstatus); - return 0x00; - } - msleep(10); - }/*max 1 seconds*/ - - } while (Retries++ < 20);/*max 20 sec*/ - return 0xff; -} - -static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) -{ - struct MessageUnit_B *reg = acb->pmuB; - uint32_t Index; - uint8_t Retries = 0x00; - - do { - for (Index = 0; Index < 100; Index++) { - if (readl(reg->iop2drv_doorbell_reg) - & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { - writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN - , reg->iop2drv_doorbell_reg); - writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); - return 0x00; - } - msleep(10); - }/*max 1 seconds*/ - - } while (Retries++ < 20);/*max 20 sec*/ - return 0xff; + return -ENODEV; } static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) @@ -616,7 +693,7 @@ static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; - writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); + writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); if (arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" @@ -642,76 +719,41 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) return rtnval; } +static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb) +{ + struct MessageUnit_B *reg = pacb->pmuB; + + writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); + if (arcmsr_hbb_wait_msgint_ready(pacb)) { + printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no); + return false; +} + return true; +} + static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) { struct scsi_cmnd *pcmd = ccb->pcmd; scsi_dma_unmap(pcmd); -} + } -static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) +static void arcmsr_ccb_complete(struct CommandControlBlock *ccb) { struct AdapterControlBlock *acb = ccb->acb; struct scsi_cmnd *pcmd = ccb->pcmd; + unsigned long flags; + atomic_dec(&acb->ccboutstandingcount); arcmsr_pci_unmap_dma(ccb); - if (stand_flag == 1) - atomic_dec(&acb->ccboutstandingcount); ccb->startdone = ARCMSR_CCB_DONE; ccb->ccb_flags = 0; + spin_lock_irqsave(&acb->ccblist_lock, flags); list_add_tail(&ccb->list, &acb->ccb_free_list); + spin_unlock_irqrestore(&acb->ccblist_lock, flags); pcmd->scsi_done(pcmd); } -static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) -{ - struct MessageUnit_A __iomem *reg = acb->pmuA; - int retry_count = 30; - - writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); - do { - if (!arcmsr_hba_wait_msgint_ready(acb)) - break; - else { - retry_count--; - printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ - timeout, retry count down = %d \n", acb->host->host_no, retry_count); - } - } while (retry_count != 0); -} - -static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) -{ - struct MessageUnit_B *reg = acb->pmuB; - int retry_count = 30; - - writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg); - do { - if (!arcmsr_hbb_wait_msgint_ready(acb)) - break; - else { - retry_count--; - printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ - timeout,retry count down = %d \n", acb->host->host_no, retry_count); - } - } while (retry_count != 0); -} - -static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) -{ - switch (acb->adapter_type) { - - case ACB_ADAPTER_TYPE_A: { - arcmsr_flush_hba_cache(acb); - } - break; - - case ACB_ADAPTER_TYPE_B: { - arcmsr_flush_hbb_cache(acb); - } - } -} - static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) { @@ -745,15 +787,15 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_B : { struct MessageUnit_B *reg = acb->pmuB; - orig_mask = readl(reg->iop2drv_doorbell_mask_reg); - writel(0, reg->iop2drv_doorbell_mask_reg); + orig_mask = readl(reg->iop2drv_doorbell_mask); + writel(0, reg->iop2drv_doorbell_mask); } break; } return orig_mask; } -static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \ +static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb, uint32_t flag_ccb) { @@ -764,13 +806,13 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \ if (acb->devstate[id][lun] == ARECA_RAID_GONE) acb->devstate[id][lun] = ARECA_RAID_GOOD; ccb->pcmd->result = DID_OK << 16; - arcmsr_ccb_complete(ccb, 1); + arcmsr_ccb_complete(ccb); } else { switch (ccb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_NO_CONNECT << 16; - arcmsr_ccb_complete(ccb, 1); + arcmsr_ccb_complete(ccb); } break; @@ -779,14 +821,14 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \ case ARCMSR_DEV_INIT_FAIL: { acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_BAD_TARGET << 16; - arcmsr_ccb_complete(ccb, 1); + arcmsr_ccb_complete(ccb); } break; case ARCMSR_DEV_CHECK_CONDITION: { acb->devstate[id][lun] = ARECA_RAID_GOOD; arcmsr_report_sense_info(ccb); - arcmsr_ccb_complete(ccb, 1); + arcmsr_ccb_complete(ccb); } break; @@ -801,7 +843,7 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \ , ccb->arcmsr_cdb.DeviceStatus); acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_NO_CONNECT << 16; - arcmsr_ccb_complete(ccb, 1); + arcmsr_ccb_complete(ccb); break; } } @@ -811,14 +853,19 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t fla { struct CommandControlBlock *ccb; + struct ARCMSR_CDB *arcmsr_cdb; + int id, lun; - ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5)); + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); + ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { if (ccb->startdone == ARCMSR_CCB_ABORTED) { struct scsi_cmnd *abortcmd = ccb->pcmd; if (abortcmd) { + id = abortcmd->device->id; + lun = abortcmd->device->lun; abortcmd->result |= DID_ABORT << 16; - arcmsr_ccb_complete(ccb, 1); + arcmsr_ccb_complete(ccb); printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \ isr got aborted command \n", acb->host->host_no, ccb); } @@ -883,6 +930,7 @@ static void arcmsr_remove(struct pci_dev *pdev) int poll_count = 0; arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); + scsi_host_put(host); flush_scheduled_work(); del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); @@ -908,17 +956,14 @@ static void arcmsr_remove(struct pci_dev *pdev) if (ccb->startdone == ARCMSR_CCB_START) { ccb->startdone = ARCMSR_CCB_ABORTED; ccb->pcmd->result = DID_ABORT << 16; - arcmsr_ccb_complete(ccb, 1); + arcmsr_ccb_complete(ccb); } } } - free_irq(pdev->irq, acb); arcmsr_free_ccb_pool(acb); + arcmsr_free_mu(acb); pci_release_regions(pdev); - - scsi_host_put(host); - pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } @@ -973,7 +1018,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); - writel(mask, reg->iop2drv_doorbell_mask_reg); + writel(mask, reg->iop2drv_doorbell_mask); acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } } @@ -986,6 +1031,9 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb, int8_t *psge = (int8_t *)&arcmsr_cdb->u; __le32 address_lo, address_hi; int arccdbsize = 0x30; + __le32 length = 0; + int i, cdb_sgcount = 0; + struct scatterlist *sg; int nseg; ccb->pcmd = pcmd; @@ -995,19 +1043,12 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb, arcmsr_cdb->LUN = pcmd->device->lun; arcmsr_cdb->Function = 1; arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len; - arcmsr_cdb->Context = (unsigned long)arcmsr_cdb; + arcmsr_cdb->Context = 0; memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); nseg = scsi_dma_map(pcmd); - if (nseg > ARCMSR_MAX_SG_ENTRIES) + if (nseg > acb->host->sg_tablesize || nseg < 0) return FAILED; - BUG_ON(nseg < 0); - - if (nseg) { - __le32 length; - int i, cdb_sgcount = 0; - struct scatterlist *sg; - /* map stor port SG list to our iop SG List. */ scsi_for_each_sg(pcmd, sg, nseg, i) { /* Get the physical address of the current data pointer */ @@ -1034,10 +1075,10 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb, } arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount; arcmsr_cdb->DataLength = scsi_bufflen(pcmd); + arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); if ( arccdbsize > 256) arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; - } - if (pcmd->sc_data_direction == DMA_TO_DEVICE ) { + if (pcmd->cmnd[0]|WRITE_6 || pcmd->cmnd[0] | WRITE_10 || pcmd->cmnd[0]|WRITE_12) { arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; ccb->ccb_flags |= CCB_FLAG_WRITE; } @@ -1046,7 +1087,7 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb, static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) { - uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr; + uint32_t shifted_cdb_phyaddr = ccb->shifted_cdb_phyaddr; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; atomic_inc(&acb->ccboutstandingcount); ccb->startdone = ARCMSR_CCB_START; @@ -1056,10 +1097,10 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr struct MessageUnit_A __iomem *reg = acb->pmuA; if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) - writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, + writel(shifted_cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, ®->inbound_queueport); else { - writel(cdb_shifted_phyaddr, ®->inbound_queueport); + writel(shifted_cdb_phyaddr, ®->inbound_queueport); } } break; @@ -1071,16 +1112,16 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); writel(0, ®->post_qbuffer[ending_index]); if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { - writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\ + writel(shifted_cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\ ®->post_qbuffer[index]); } else { - writel(cdb_shifted_phyaddr, ®->post_qbuffer[index]); + writel(shifted_cdb_phyaddr, ®->post_qbuffer[index]); } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ reg->postq_index = index; - writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg); + writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell); } break; } @@ -1103,7 +1144,7 @@ static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; - writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg); + writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); if (arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE @@ -1131,23 +1172,14 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { + dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); iounmap(acb->pmuA); - dma_free_coherent(&acb->pdev->dev, - ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, - acb->dma_coherent, - acb->dma_coherent_handle); - break; } + break; case ACB_ADAPTER_TYPE_B: { - struct MessageUnit_B *reg = acb->pmuB; - iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); - iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); - dma_free_coherent(&acb->pdev->dev, - (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + |