diff options
Diffstat (limited to 'drivers/scsi/bfa/bfad.c')
| -rw-r--r-- | drivers/scsi/bfa/bfad.c | 752 |
1 files changed, 491 insertions, 261 deletions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 1f938974b84..7593b7c1d33 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -32,7 +32,6 @@ #include "bfad_drv.h" #include "bfad_im.h" #include "bfa_fcs.h" -#include "bfa_os_inc.h" #include "bfa_defs.h" #include "bfa.h" @@ -50,30 +49,42 @@ int reqq_size, rspq_size, num_sgpgs; int rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT; int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; int bfa_io_max_sge = BFAD_IO_MAX_SGE; -int log_level = 3; /* WARNING log level */ +int bfa_log_level = 3; /* WARNING log level */ int ioc_auto_recover = BFA_TRUE; int bfa_linkup_delay = -1; int fdmi_enable = BFA_TRUE; int pcie_max_read_reqsz; int bfa_debugfs_enable = 1; int msix_disable_cb = 0, msix_disable_ct = 0; +int max_xfer_size = BFAD_MAX_SECTORS >> 1; +int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS; -u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; -u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; +/* Firmware releated */ +u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; +u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -const char *msix_name_ct[] = { +#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin" +#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin" +#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" + +static u32 *bfad_load_fwimg(struct pci_dev *pdev); +static void bfad_free_fwimg(void); +static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name); + +static const char *msix_name_ct[] = { + "ctrl", "cpe0", "cpe1", "cpe2", "cpe3", - "rme0", "rme1", "rme2", "rme3", - "ctrl" }; + "rme0", "rme1", "rme2", "rme3" }; -const char *msix_name_cb[] = { +static const char *msix_name_cb[] = { "cpe0", "cpe1", "cpe2", "cpe3", "rme0", "rme1", "rme2", "rme3", "eemc", "elpu0", "elpu1", "epss", "mlpu" }; -MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC); -MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA); -MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC); +MODULE_FIRMWARE(BFAD_FW_FILE_CB); +MODULE_FIRMWARE(BFAD_FW_FILE_CT); +MODULE_FIRMWARE(BFAD_FW_FILE_CT2); module_param(os_name, charp, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(os_name, "OS name of the hba host machine"); @@ -108,8 +119,8 @@ module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]"); module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255"); -module_param(log_level, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(log_level, "Driver log level, default=3, " +module_param(bfa_log_level, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, " "Range[Critical:1|Error:2|Warning:3|Info:4]"); module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, " @@ -135,6 +146,11 @@ MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 " module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1," " Range[false:0|true:1]"); +module_param(max_xfer_size, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_xfer_size, "default=32MB," + " Range[64k|128k|256k|512k|1024k|2048k]"); +module_param(max_rport_logins, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_rport_logins, "Max number of logins to initiator and target rports on a port (physical/logical), default=1024"); static void bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event); @@ -188,6 +204,7 @@ static void bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) { unsigned long flags; + bfa_status_t ret; bfa_trc(bfad, event); @@ -201,12 +218,12 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) if (bfad_setup_intr(bfad)) { printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n", bfad->inst_no); - bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); break; } spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_init(&bfad->bfa); + bfa_iocfc_init(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* Set up interrupt handler for each vectors */ @@ -223,8 +240,29 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS); } else { + printk(KERN_WARNING + "bfa %s: bfa init failed\n", + bfad->pci_name); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_init(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); + if (ret != BFA_STATUS_OK) { + init_completion(&bfad->comp); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->pport.flags |= BFAD_PORT_DELETE; + bfa_fcs_exit(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + wait_for_completion(&bfad->comp); + + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + break; + } bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; - bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED); } break; @@ -254,12 +292,14 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) spin_unlock_irqrestore(&bfad->bfad_lock, flags); retval = bfad_start_ops(bfad); - if (retval != BFA_STATUS_OK) + if (retval != BFA_STATUS_OK) { + bfa_sm_set_state(bfad, bfad_sm_failed); break; + } bfa_sm_set_state(bfad, bfad_sm_operational); break; - case BFAD_E_INTR_INIT_FAILED: + case BFAD_E_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_uninit); kthread_stop(bfad->bfad_tsk); spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -267,7 +307,7 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event) spin_unlock_irqrestore(&bfad->bfad_lock, flags); break; - case BFAD_E_INIT_FAILED: + case BFAD_E_HAL_INIT_FAILED: bfa_sm_set_state(bfad, bfad_sm_failed); break; default: @@ -291,13 +331,8 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event) break; case BFAD_E_STOP: - if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) - bfad_uncfg_pport(bfad); - if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { - bfad_im_probe_undo(bfad); - bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; - } - bfad_stop(bfad); + bfa_sm_set_state(bfad, bfad_sm_fcs_exit); + bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP); break; case BFAD_E_EXIT_COMP: @@ -440,23 +475,6 @@ bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port, return port_drv; } -void -bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, - struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv) -{ - struct bfad_port_s *port_drv; - - /* this will be only called from rmmod context */ - if (vp_drv && !vp_drv->comp_del) { - port_drv = (vp_drv) ? (&(vp_drv)->drv_port) : - ((vf_drv) ? (&(vf_drv)->base_port) : - (&(bfad)->pport)); - bfa_trc(bfad, roles); - if (roles & BFA_LPORT_ROLE_FCP_IM) - bfad_im_port_delete(bfad, port_drv); - } -} - /* * FCS RPORT alloc callback, after successful PLOGI by FCS */ @@ -489,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) struct bfad_vport_s *vport; int rc; - vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); + vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC); if (!vport) { bfa_trc(bfad, 0); return; @@ -515,28 +533,26 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) void bfad_hal_mem_release(struct bfad_s *bfad) { - int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; - struct bfa_mem_elem_s *meminfo_elem; - - for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { - meminfo_elem = &hal_meminfo->meminfo[i]; - if (meminfo_elem->kva != NULL) { - switch (meminfo_elem->mem_type) { - case BFA_MEM_TYPE_KVA: - vfree(meminfo_elem->kva); - break; - case BFA_MEM_TYPE_DMA: - dma_free_coherent(&bfad->pcidev->dev, - meminfo_elem->mem_len, - meminfo_elem->kva, - (dma_addr_t) meminfo_elem->dma); - break; - default: - bfa_assert(0); - break; - } - } + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; + + dma_info = &hal_meminfo->dma_info; + kva_info = &hal_meminfo->kva_info; + + /* Iterate through the KVA meminfo queue */ + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + vfree(kva_elem->kva); + } + + /* Iterate through the DMA meminfo queue */ + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_free_coherent(&bfad->pcidev->dev, + dma_elem->mem_len, dma_elem->kva, + (dma_addr_t) dma_elem->dma); } memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s)); @@ -551,15 +567,15 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) bfa_cfg->fwcfg.num_ioim_reqs = num_ios; if (num_tms > 0) bfa_cfg->fwcfg.num_tskim_reqs = num_tms; - if (num_fcxps > 0) + if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX) bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps; - if (num_ufbufs > 0) + if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX) bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs; if (reqq_size > 0) bfa_cfg->drvcfg.num_reqq_elems = reqq_size; if (rspq_size > 0) bfa_cfg->drvcfg.num_rspq_elems = rspq_size; - if (num_sgpgs > 0) + if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX) bfa_cfg->drvcfg.num_sgpgs = num_sgpgs; /* @@ -579,85 +595,46 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg) bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad) { - int i; struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo; - struct bfa_mem_elem_s *meminfo_elem; - dma_addr_t phys_addr; - void *kva; + struct bfa_mem_dma_s *dma_info, *dma_elem; + struct bfa_mem_kva_s *kva_info, *kva_elem; + struct list_head *dm_qe, *km_qe; bfa_status_t rc = BFA_STATUS_OK; - int retry_count = 0; - int reset_value = 1; - int min_num_sgpgs = 512; + dma_addr_t phys_addr; bfa_cfg_get_default(&bfad->ioc_cfg); - -retry: bfad_update_hal_cfg(&bfad->ioc_cfg); bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs; - bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo); - - for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { - meminfo_elem = &hal_meminfo->meminfo[i]; - switch (meminfo_elem->mem_type) { - case BFA_MEM_TYPE_KVA: - kva = vmalloc(meminfo_elem->mem_len); - if (kva == NULL) { - bfad_hal_mem_release(bfad); - rc = BFA_STATUS_ENOMEM; - goto ext; - } - memset(kva, 0, meminfo_elem->mem_len); - meminfo_elem->kva = kva; - break; - case BFA_MEM_TYPE_DMA: - kva = dma_alloc_coherent(&bfad->pcidev->dev, - meminfo_elem->mem_len, &phys_addr, GFP_KERNEL); - if (kva == NULL) { - bfad_hal_mem_release(bfad); - /* - * If we cannot allocate with default - * num_sgpages try with half the value. - */ - if (num_sgpgs > min_num_sgpgs) { - printk(KERN_INFO - "bfad[%d]: memory allocation failed" - " with num_sgpgs: %d\n", - bfad->inst_no, num_sgpgs); - nextLowerInt(&num_sgpgs); - printk(KERN_INFO - "bfad[%d]: trying to allocate memory" - " with num_sgpgs: %d\n", - bfad->inst_no, num_sgpgs); - retry_count++; - goto retry; - } else { - if (num_sgpgs_parm > 0) - num_sgpgs = num_sgpgs_parm; - else { - reset_value = - (1 << retry_count); - num_sgpgs *= reset_value; - } - rc = BFA_STATUS_ENOMEM; - goto ext; - } - } - - if (num_sgpgs_parm > 0) - num_sgpgs = num_sgpgs_parm; - else { - reset_value = (1 << retry_count); - num_sgpgs *= reset_value; - } - - memset(kva, 0, meminfo_elem->mem_len); - meminfo_elem->kva = kva; - meminfo_elem->dma = phys_addr; - break; - default: - break; + bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa); + + dma_info = &hal_meminfo->dma_info; + kva_info = &hal_meminfo->kva_info; + + /* Iterate through the KVA meminfo queue */ + list_for_each(km_qe, &kva_info->qe) { + kva_elem = (struct bfa_mem_kva_s *) km_qe; + kva_elem->kva = vmalloc(kva_elem->mem_len); + if (kva_elem->kva == NULL) { + bfad_hal_mem_release(bfad); + rc = BFA_STATUS_ENOMEM; + goto ext; + } + memset(kva_elem->kva, 0, kva_elem->mem_len); + } + /* Iterate through the DMA meminfo queue */ + list_for_each(dm_qe, &dma_info->qe) { + dma_elem = (struct bfa_mem_dma_s *) dm_qe; + dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev, + dma_elem->mem_len, + &phys_addr, GFP_KERNEL); + if (dma_elem->kva == NULL) { + bfad_hal_mem_release(bfad); + rc = BFA_STATUS_ENOMEM; + goto ext; } + dma_elem->dma = phys_addr; + memset(dma_elem->kva, 0, dma_elem->mem_len); } ext: return rc; @@ -699,6 +676,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id, spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_start(&vport->fcs_vport); + list_add_tail(&vport->list_entry, &bfad->vport_list); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; @@ -725,7 +703,7 @@ bfad_bfa_tmo(unsigned long data) spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_timer_tick(&bfad->bfa); + bfa_timer_beat(&bfad->bfa.timer_mod); bfa_comp_deq(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -768,13 +746,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) pci_set_master(pdev); - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev); goto out_release_region; } + } + + /* Enable PCIE Advanced Error Recovery (AER) if kernel supports */ + pci_enable_pcie_error_reporting(pdev); bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2)); if (bfad->pci_bar0_kva == NULL) { printk(KERN_ERR "Fail to map bar0\n"); @@ -785,6 +770,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn); bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva; bfad->hal_pcidev.device_id = pdev->device; + bfad->hal_pcidev.ssid = pdev->subsystem_device; bfad->pci_name = pci_name(pdev); bfad->pci_attr.vendor_id = pdev->vendor; @@ -796,52 +782,25 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) bfad->pcidev = pdev; /* Adjust PCIe Maximum Read Request Size */ - if (pcie_max_read_reqsz > 0) { - int pcie_cap_reg; - u16 pcie_dev_ctl; - u16 mask = 0xffff; - - switch (pcie_max_read_reqsz) { - case 128: - mask = 0x0; - break; - case 256: - mask = 0x1000; - break; - case 512: - mask = 0x2000; - break; - case 1024: - mask = 0x3000; - break; - case 2048: - mask = 0x4000; - break; - case 4096: - mask = 0x5000; - break; - default: - break; - } - - pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (mask != 0xffff && pcie_cap_reg) { - pcie_cap_reg += 0x08; - pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); - if ((pcie_dev_ctl & 0x7000) != mask) { - printk(KERN_WARNING "BFA[%s]: " + if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { + if (pcie_max_read_reqsz >= 128 && + pcie_max_read_reqsz <= 4096 && + is_power_of_2(pcie_max_read_reqsz)) { + int max_rq = pcie_get_readrq(pdev); + printk(KERN_WARNING "BFA[%s]: " "pcie_max_read_request_size is %d, " - "reset to %d\n", bfad->pci_name, - (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, + "reset to %d\n", bfad->pci_name, max_rq, pcie_max_read_reqsz); - - pcie_dev_ctl &= ~0x7000; - pci_write_config_word(pdev, pcie_cap_reg, - pcie_dev_ctl | mask); - } + pcie_set_readrq(pdev, pcie_max_read_reqsz); + } else { + printk(KERN_WARNING "BFA[%s]: invalid " + "pcie_max_read_request_size %d ignored\n", + bfad->pci_name, pcie_max_read_reqsz); } } + pci_save_state(pdev); + return 0; out_release_region: @@ -856,9 +815,11 @@ void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) { pci_iounmap(pdev, bfad->pci_bar0_kva); + pci_iounmap(pdev, bfad->pci_bar2_kva); pci_release_regions(pdev); + /* Disable PCIE Advanced Error Recovery (AER) */ + pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } bfa_status_t @@ -879,11 +840,11 @@ bfad_drv_init(struct bfad_s *bfad) printk(KERN_WARNING "Not enough memory to attach all Brocade HBA ports, %s", "System may need more memory.\n"); - goto out_hal_mem_alloc_failure; + return BFA_STATUS_FAILED; } - bfa_init_trc(&bfad->bfa, bfad->trcmod); - bfa_init_plog(&bfad->bfa, &bfad->plog_buf); + bfad->bfa.trcmod = bfad->trcmod; + bfad->bfa.plog = &bfad->plog_buf; bfa_plog_init(&bfad->plog_buf); bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, 0, "Driver Attach"); @@ -893,17 +854,14 @@ bfad_drv_init(struct bfad_s *bfad) /* FCS INIT */ spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); + bfad->bfa_fcs.trcmod = bfad->trcmod; bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); - bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); + bfad->bfa_fcs.fdmi_enabled = fdmi_enable; spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; return BFA_STATUS_OK; - -out_hal_mem_alloc_failure: - return BFA_STATUS_FAILED; } void @@ -913,7 +871,7 @@ bfad_drv_uninit(struct bfad_s *bfad) spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); - bfa_stop(&bfad->bfa); + bfa_iocfc_stop(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); @@ -932,8 +890,9 @@ bfad_drv_start(struct bfad_s *bfad) unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_start(&bfad->bfa); - bfa_fcs_start(&bfad->bfa_fcs); + bfa_iocfc_start(&bfad->bfa); + bfa_fcs_pbc_vport_init(&bfad->bfa_fcs); + bfa_fcs_fabric_modstart(&bfad->bfa_fcs); bfad->bfad_flags |= BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -963,7 +922,7 @@ bfad_stop(struct bfad_s *bfad) spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); - bfa_stop(&bfad->bfa); + bfa_iocfc_stop(&bfad->bfa); bfad->bfad_flags &= ~BFAD_HAL_START_DONE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); @@ -992,10 +951,6 @@ bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role) bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM; } - /* Setup the debugfs node for this scsi_host */ - if (bfa_debugfs_enable) - bfad_debugfs_init(&bfad->pport); - bfad->bfad_flags |= BFAD_CFG_PPORT_DONE; out: @@ -1005,10 +960,6 @@ out: void bfad_uncfg_pport(struct bfad_s *bfad) { - /* Remove the debugfs node for this scsi_host */ - kfree(bfad->regdata); - bfad_debugfs_exit(&bfad->pport); - if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) && (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) { bfad_im_scsi_host_free(bfad, bfad->pport.im_port); @@ -1028,6 +979,12 @@ bfad_start_ops(struct bfad_s *bfad) { struct bfad_vport_s *vport, *vport_new; struct bfa_fcs_driver_info_s driver_info; + /* Limit min/max. xfer size to [64k-32MB] */ + if (max_xfer_size < BFAD_MIN_SECTORS >> 1) + max_xfer_size = BFAD_MIN_SECTORS >> 1; + if (max_xfer_size > BFAD_MAX_SECTORS >> 1) + max_xfer_size = BFAD_MAX_SECTORS >> 1; + /* Fill the driver_info info to fcs*/ memset(&driver_info, 0, sizeof(driver_info)); strncpy(driver_info.version, BFAD_DRIVER_VERSION, @@ -1043,32 +1000,34 @@ bfad_start_ops(struct bfad_s *bfad) { sizeof(driver_info.host_os_patch) - 1); strncpy(driver_info.os_device_name, bfad->pci_name, - sizeof(driver_info.os_device_name - 1)); + sizeof(driver_info.os_device_name) - 1); - /* FCS INIT */ + /* FCS driver info init */ spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); - bfa_fcs_init(&bfad->bfa_fcs); + + if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) + bfa_fcs_update_cfg(&bfad->bfa_fcs); + else + bfa_fcs_init(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); - retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); - if (retval != BFA_STATUS_OK) { - if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) - bfa_sm_set_state(bfad, bfad_sm_failed); - bfad_stop(bfad); - return BFA_STATUS_FAILED; + if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) { + retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); + if (retval != BFA_STATUS_OK) + return BFA_STATUS_FAILED; } + /* Setup fc host fixed attribute if the lk supports */ + bfad_fc_host_init(bfad->pport.im_port); + /* BFAD level FC4 IM specific resource allocation */ retval = bfad_im_probe(bfad); if (retval != BFA_STATUS_OK) { printk(KERN_WARNING "bfad_im_probe failed\n"); if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) bfa_sm_set_state(bfad, bfad_sm_failed); - bfad_im_probe_undo(bfad); - bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; - bfad_uncfg_pport(bfad); - bfad_stop(bfad); return BFA_STATUS_FAILED; } else bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; @@ -1102,17 +1061,17 @@ bfad_start_ops(struct bfad_s *bfad) { /* * If bfa_linkup_delay is set to -1 default; try to retrive the - * value using the bfad_os_get_linkup_delay(); else use the + * value using the bfad_get_linkup_delay(); else use the * passed in module param value as the bfa_linkup_delay. */ if (bfa_linkup_delay < 0) { - bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); - bfad_os_rport_online_wait(bfad); + bfa_linkup_delay = bfad_get_linkup_delay(bfad); + bfad_rport_online_wait(bfad); bfa_linkup_delay = -1; } else - bfad_os_rport_online_wait(bfad); + bfad_rport_online_wait(bfad); - BFA_LOG(KERN_INFO, bfad, log_level, "bfa device claimed\n"); + BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); return BFA_STATUS_OK; } @@ -1167,7 +1126,6 @@ bfad_intx(int irq, void *dev_id) spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_comp_free(&bfad->bfa, &doneq); spin_unlock_irqrestore(&bfad->bfad_lock, flags); - bfa_trc_fp(bfad, irq); } return IRQ_HANDLED; @@ -1230,8 +1188,8 @@ bfad_install_msix_handler(struct bfad_s *bfad) for (i = 0; i < bfad->nvec; i++) { sprintf(bfad->msix_tab[i].name, "bfa-%s-%s", bfad->pci_name, - ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ? - msix_name_ct[i] : msix_name_cb[i])); + ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ? + msix_name_cb[i] : msix_name_ct[i])); error = request_irq(bfad->msix_tab[i].msix.vector, (irq_handler_t) bfad_msix, 0, @@ -1245,6 +1203,9 @@ bfad_install_msix_handler(struct bfad_s *bfad) free_irq(bfad->msix_tab[j].msix.vector, &bfad->msix_tab[j]); + bfad->bfad_flags &= ~BFAD_MSIX_ON; + pci_disable_msix(bfad->pcidev); + return 1; } } @@ -1262,6 +1223,7 @@ bfad_setup_intr(struct bfad_s *bfad) u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; struct pci_dev *pdev = bfad->pcidev; + u16 reg; /* Call BFA to get the msix map for this PCI function. */ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); @@ -1269,27 +1231,45 @@ bfad_setup_intr(struct bfad_s *bfad) /* Set up the msix entry table */ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); - if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) || - (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) { + if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) || + (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) { error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); if (error) { + /* In CT1 & CT2, try to allocate just one vector */ + if (bfa_asic_id_ctc(pdev->device)) { + printk(KERN_WARNING "bfa %s: trying one msix " + "vector failed to allocate %d[%d]\n", + bfad->pci_name, bfad->nvec, error); + bfad->nvec = 1; + error = pci_enable_msix(bfad->pcidev, + msix_entries, bfad->nvec); + } + /* * Only error number of vector is available. * We don't have a mechanism to map multiple * interrupts into one vector, so even if we * can try to request less vectors, we don't * know how to associate interrupt events to - * vectors. Linux doesn't dupicate vectors + * vectors. Linux doesn't duplicate vectors * in the MSIX table for this case. */ + if (error) { + printk(KERN_WARNING "bfad%d: " + "pci_enable_msix failed (%d), " + "use line based.\n", + bfad->inst_no, error); + goto line_based; + } + } - printk(KERN_WARNING "bfad%d: " - "pci_enable_msix failed (%d)," - " use line based.\n", bfad->inst_no, error); + /* Disable INTX in MSI-X mode */ + pci_read_config_word(pdev, PCI_COMMAND, ®); - goto line_based; - } + if (!(reg & PCI_COMMAND_INTX_DISABLE)) + pci_write_config_word(pdev, PCI_COMMAND, + reg | PCI_COMMAND_INTX_DISABLE); /* Save the vectors */ for (i = 0; i < bfad->nvec; i++) { @@ -1312,6 +1292,7 @@ line_based: /* Enable interrupt handler failed */ return 1; } + bfad->bfad_flags |= BFAD_INTX_ON; return error; } @@ -1328,7 +1309,7 @@ bfad_remove_intr(struct bfad_s *bfad) pci_disable_msix(bfad->pcidev); bfad->bfad_flags &= ~BFAD_MSIX_ON; - } else { + } else if (bfad->bfad_flags & BFAD_INTX_ON) { free_irq(bfad->pcidev->irq, bfad); } } @@ -1340,7 +1321,7 @@ int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct bfad_s *bfad; - int error = -ENODEV, retval; + int error = -ENODEV, retval, i; /* For single port cards - only claim function 0 */ if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) && @@ -1364,6 +1345,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfa_trc_init(bfad->trcmod); bfa_trc(bfad, bfad_inst); + /* AEN INIT */ + INIT_LIST_HEAD(&bfad->free_aen_q); + INIT_LIST_HEAD(&bfad->active_aen_q); + for (i = 0; i < BFA_AEN_MAX_ENTRY; i++) + list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q); + if (!(bfad_load_fwimg(pdev))) { kfree(bfad->trcmod); goto out_alloc_trace_failure; @@ -1385,11 +1372,18 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfa_sm_set_state(bfad, bfad_sm_uninit); spin_lock_init(&bfad->bfad_lock); + spin_lock_init(&bfad->bfad_aen_spinlock); + pci_set_drvdata(pdev, bfad); bfad->ref_count = 0; bfad->pport.bfad = bfad; INIT_LIST_HEAD(&bfad->pbc_vport_list); + INIT_LIST_HEAD(&bfad->vport_list); + + /* Setup the debugfs node for this bfad */ + if (bfa_debugfs_enable) + bfad_debugfs_init(&bfad->pport); retval = bfad_drv_init(bfad); if (retval != BFA_STATUS_OK) @@ -1403,9 +1397,11 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) return 0; out_bfad_sm_failure: - bfa_detach(&bfad->bfa); bfad_hal_mem_release(bfad); out_drv_init_failure: + /* Remove the debugfs node for this bfad */ + kfree(bfad->regdata); + bfad_debugfs_exit(&bfad->pport); mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); @@ -1447,6 +1443,10 @@ bfad_pci_remove(struct pci_dev *pdev) spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad_hal_mem_release(bfad); + /* Remove the debugfs node for this bfad */ + kfree(bfad->regdata); + bfad_debugfs_exit(&bfad->pport); + /* Cleaning the BFAD instance */ mutex_lock(&bfad_mutex); bfad_inst--; @@ -1458,6 +1458,197 @@ bfad_pci_remove(struct pci_dev *pdev) kfree(bfad); } +/* + * PCI Error Recovery entry, error detected. + */ +static pci_ers_result_t +bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct bfad_s *bfad = pci_get_drvdata(pdev); + unsigned long flags; + pci_ers_result_t ret = PCI_ERS_RESULT_NONE; + + dev_printk(KERN_ERR, &pdev->dev, + "error detected state: %d - flags: 0x%x\n", + state, bfad->bfad_flags); + + switch (state) { + case pci_channel_io_normal: /* non-fatal error */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags &= ~BFAD_EEH_BUSY; + /* Suspend/fail all bfa operations */ + bfa_ioc_suspend(&bfad->bfa.ioc); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + del_timer_sync(&bfad->hal_tmo); + ret = PCI_ERS_RESULT_CAN_RECOVER; + break; + case pci_channel_io_frozen: /* fatal error */ + init_completion(&bfad->comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags |= BFAD_EEH_BUSY; + /* Suspend/fail all bfa operations */ + bfa_ioc_suspend(&bfad->bfa.ioc); + bfa_fcs_stop(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + pci_disable_device(pdev); + ret = PCI_ERS_RESULT_NEED_RESET; + break; + case pci_channel_io_perm_failure: /* PCI Card is DEAD */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags |= BFAD_EEH_BUSY | + BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* If the error_detected handler is called with the reason + * pci_channel_io_perm_failure - it will subsequently call + * pci_remove() entry point to remove the pci device from the + * system - So defer the cleanup to pci_remove(); cleaning up + * here causes inconsistent state during pci_remove(). + */ + ret = PCI_ERS_RESULT_DISCONNECT; + break; + default: + WARN_ON(1); + } + + return ret; +} + +int +restart_bfa(struct bfad_s *bfad) +{ + unsigned long flags; + struct pci_dev *pdev = bfad->pcidev; + + bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, + &bfad->meminfo, &bfad->hal_pcidev); + + /* Enable Interrupt and wait bfa_init completion */ + if (bfad_setup_intr(bfad)) { + dev_printk(KERN_WARNING, &pdev->dev, + "%s: bfad_setup_intr failed\n", bfad->pci_name); + bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); + return -1; + } + + init_completion(&bfad->comp); + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_iocfc_init(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + /* Set up interrupt handler for each vectors */ + if ((bfad->bfad_flags & BFAD_MSIX_ON) && + bfad_install_msix_handler(bfad)) + dev_printk(KERN_WARNING, &pdev->dev, + "%s: install_msix failed.\n", bfad->pci_name); + + bfad_init_timer(bfad); + wait_for_completion(&bfad->comp); + bfad_drv_start(bfad); + + return 0; +} + +/* + * PCI Error Recovery entry, re-initialize the chip. + */ +static pci_ers_result_t +bfad_pci_slot_reset(struct pci_dev *pdev) +{ + struct bfad_s *bfad = pci_get_drvdata(pdev); + u8 byte; + + dev_printk(KERN_ERR, &pdev->dev, + "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags); + + if (pci_enable_device(pdev)) { + dev_printk(KERN_ERR, &pdev->dev, "Cannot re-enable " + "PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_restore_state(pdev); + + /* + * Read some byte (e.g. DMA max. payload size which can't + * be 0xff any time) to make sure - we did not hit another PCI error + * in the middle of recovery. If we did, then declare permanent failure. + */ + pci_read_config_byte(pdev, 0x68, &byte); + if (byte == 0xff) { + dev_printk(KERN_ERR, &pdev->dev, + "slot_reset failed ... got another PCI error !\n"); + goto out_disable_device; + } + + pci_save_state(pdev); + pci_set_master(pdev); + + if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(64)) != 0) + if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0) + goto out_disable_device; + + pci_cleanup_aer_uncorrect_error_status(pdev); + + if (restart_bfa(bfad) == -1) + goto out_disable_device; + + pci_enable_pcie_error_reporting(pdev); + dev_printk(KERN_WARNING, &pdev->dev, + "slot_reset completed flags: 0x%x!\n", bfad->bfad_flags); + + return PCI_ERS_RESULT_RECOVERED; + +out_disable_device: + pci_disable_device(pdev); + return PCI_ERS_RESULT_DISCONNECT; +} + +static pci_ers_result_t +bfad_pci_mmio_enabled(struct pci_dev *pdev) +{ + unsigned long flags; + struct bfad_s *bfad = pci_get_drvdata(pdev); + + dev_printk(KERN_INFO, &pdev->dev, "mmio_enabled\n"); + + /* Fetch FW diagnostic information */ + bfa_ioc_debug_save_ftrc(&bfad->bfa.ioc); + + /* Cancel all pending IOs */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfa_fcs_stop(&bfad->bfa_fcs); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + + bfad_remove_intr(bfad); + del_timer_sync(&bfad->hal_tmo); + pci_disable_device(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static void +bfad_pci_resume(struct pci_dev *pdev) +{ + unsigned long flags; + struct bfad_s *bfad = pci_get_drvdata(pdev); + + dev_printk(KERN_WARNING, &pdev->dev, "resume\n"); + + /* wait until the link is online */ + bfad_rport_online_wait(bfad); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_flags &= ~BFAD_EEH_BUSY; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); +} + struct pci_device_id bfad_id_table[] = { { .vendor = BFA_PCI_VENDOR_ID_BROCADE, @@ -1487,17 +1678,44 @@ struct pci_device_id bfad_id_table[] = { .class = (PCI_CLASS_SERIAL_FIBER << 8), .class_mask = ~0, }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, + { + .vendor = BFA_PCI_VENDOR_ID_BROCADE, + .device = BFA_PCI_DEVICE_ID_CT2_QUAD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_SERIAL_FIBER << 8), + .class_mask = ~0, + }, {0, 0}, }; MODULE_DEVICE_TABLE(pci, bfad_id_table); +/* + * PCI error recovery handlers. + */ +static struct pci_error_handlers bfad_err_handler = { + .error_detected = bfad_pci_error_detected, + .slot_reset = bfad_pci_slot_reset, + .mmio_enabled = bfad_pci_mmio_enabled, + .resume = bfad_pci_resume, +}; + static struct pci_driver bfad_pci_driver = { .name = BFAD_DRIVER_NAME, .id_table = bfad_id_table, .probe = bfad_pci_probe, - .remove = __devexit_p(bfad_pci_remove), + .remove = bfad_pci_remove, + .err_handler = &bfad_err_handler, }; /* @@ -1524,8 +1742,9 @@ bfad_init(void) if (strcmp(FCPI_NAME, " fcpim") == 0) supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; - bfa_ioc_auto_recover(ioc_auto_recover); + bfa_auto_recover = ioc_auto_recover; bfa_fcs_rport_set_del_timeout(rport_del_timeout); + bfa_fcs_rport_set_max_logins(max_rport_logins); error = pci_register_driver(&bfad_pci_driver); if (error) { @@ -1552,7 +1771,7 @@ bfad_exit(void) } /* Firmware handling */ -u32 * +static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { @@ -1560,44 +1779,55 @@ bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, if (request_firmware(&fw, fw_name, &pdev->dev)) { printk(KERN_ALERT "Can't locate firmware %s\n", fw_name); - goto error; + *bfi_image = NULL; + goto out; } *bfi_image = vmalloc(fw->size); if (NULL == *bfi_image) { printk(KERN_ALERT "Fail to allocate buffer for fw image " "size=%x!\n", (u32) fw->size); - goto error; + goto out; } memcpy(*bfi_image, fw->data, fw->size); *bfi_image_size = fw->size/sizeof(u32); +out: + release_firmware(fw); +} - return *bfi_image; +static u32 * +bfad_load_fwimg(struct pci_dev *pdev) +{ + if (bfa_asic_id_ct2(pdev->device)) { + if (bfi_image_ct2_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct2, + &bfi_image_ct2_size, BFAD_FW_FILE_CT2); + return bfi_image_ct2; + } else if (bfa_asic_id_ct(pdev->device)) { + if (bfi_image_ct_size == 0) + bfad_read_firmware(pdev, &bfi_image_ct, + &bfi_image_ct_size, BFAD_FW_FILE_CT); + return bfi_image_ct; + } else if (bfa_asic_id_cb(pdev->device)) { + if (bfi_image_cb_size == 0) + bfad_read_firmware(pdev, &bfi_image_cb, + &bfi_image_cb_size, BFAD_FW_FILE_CB); + return bfi_image_cb; + } -error: return NULL; } -u32 * -bfad_get_firmware_buf(struct pci_dev *pdev) +static void +bfad_free_fwimg(void) { - if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) { - if (bfi_image_ct_fc_size == 0) - bfad_read_firmware(pdev, &bfi_image_ct_fc, - &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC); - return bfi_image_ct_fc; - } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) { - if (bfi_image_ct_cna_size == 0) - bfad_read_firmware(pdev, &bfi_image_ct_cna, - &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA); - return bfi_image_ct_cna; - } else { - if (bfi_image_cb_fc_size == 0) - bfad_read_firmware(pdev, &bfi_image_cb_fc, - &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC); - return bfi_image_cb_fc; - } + if (bfi_image_ct2_size && bfi_image_ct2) + vfree(bfi_image_ct2); + if (bfi_image_ct_size && bfi_image_ct) + vfree(bfi_image_ct); + if (bfi_image_cb_size && bfi_image_cb) + vfree(bfi_image_cb); } module_init(bfad_init); |
