diff options
Diffstat (limited to 'drivers/scsi/be2iscsi/be_main.c')
| -rw-r--r-- | drivers/scsi/be2iscsi/be_main.c | 2818 |
1 files changed, 1932 insertions, 886 deletions
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index ff73f9500b0..56467df3d6d 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2011 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -47,8 +47,6 @@ static unsigned int be_iopoll_budget = 10; static unsigned int be_max_phys_size = 64; static unsigned int enable_msix = 1; -static unsigned int gcrashmode = 0; -static unsigned int num_hba = 0; MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); @@ -151,13 +149,67 @@ BEISCSI_RW_ATTR(log_enable, 0x00, "\t\t\t\tMiscellaneous Events : 0x04\n" "\t\t\t\tError Handling : 0x08\n" "\t\t\t\tIO Path Events : 0x10\n" - "\t\t\t\tConfiguration Path : 0x20\n"); - + "\t\t\t\tConfiguration Path : 0x20\n" + "\t\t\t\tiSCSI Protocol : 0x40\n"); + +DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); +DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); +DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); +DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); +DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, + beiscsi_active_session_disp, NULL); +DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, + beiscsi_free_session_disp, NULL); struct device_attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable, + &dev_attr_beiscsi_drvr_ver, + &dev_attr_beiscsi_adapter_family, + &dev_attr_beiscsi_fw_ver, + &dev_attr_beiscsi_active_session_count, + &dev_attr_beiscsi_free_session_count, + &dev_attr_beiscsi_phys_port, NULL, }; +static char const *cqe_desc[] = { + "RESERVED_DESC", + "SOL_CMD_COMPLETE", + "SOL_CMD_KILLED_DATA_DIGEST_ERR", + "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", + "CXN_KILLED_BURST_LEN_MISMATCH", + "CXN_KILLED_AHS_RCVD", + "CXN_KILLED_HDR_DIGEST_ERR", + "CXN_KILLED_UNKNOWN_HDR", + "CXN_KILLED_STALE_ITT_TTT_RCVD", + "CXN_KILLED_INVALID_ITT_TTT_RCVD", + "CXN_KILLED_RST_RCVD", + "CXN_KILLED_TIMED_OUT", + "CXN_KILLED_RST_SENT", + "CXN_KILLED_FIN_RCVD", + "CXN_KILLED_BAD_UNSOL_PDU_RCVD", + "CXN_KILLED_BAD_WRB_INDEX_ERROR", + "CXN_KILLED_OVER_RUN_RESIDUAL", + "CXN_KILLED_UNDER_RUN_RESIDUAL", + "CMD_KILLED_INVALID_STATSN_RCVD", + "CMD_KILLED_INVALID_R2T_RCVD", + "CMD_CXN_KILLED_LUN_INVALID", + "CMD_CXN_KILLED_ICD_INVALID", + "CMD_CXN_KILLED_ITT_INVALID", + "CMD_CXN_KILLED_SEQ_OUTOFORDER", + "CMD_CXN_KILLED_INVALID_DATASN_RCVD", + "CXN_INVALIDATE_NOTIFY", + "CXN_INVALIDATE_INDEX_NOTIFY", + "CMD_INVALIDATED_NOTIFY", + "UNSOL_HDR_NOTIFY", + "UNSOL_DATA_NOTIFY", + "UNSOL_DATA_DIGEST_ERROR_NOTIFY", + "DRIVERMSG_NOTIFY", + "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", + "SOL_CMD_KILLED_DIF_ERR", + "CXN_KILLED_SYN_RCVD", + "CXN_KILLED_IMM_DATA_RCVD" +}; + static int beiscsi_slave_configure(struct scsi_device *sdev) { blk_queue_max_segment_size(sdev->request_queue, 65536); @@ -176,24 +228,30 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) struct invalidate_command_table *inv_tbl; struct be_dma_mem nonemb_cmd; unsigned int cid, tag, num_invalidate; + int rc; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (!aborted_task || !aborted_task->sc) { /* we raced */ - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return SUCCESS; } aborted_io_task = aborted_task->dd_data; if (!aborted_io_task->scsi_cmnd) { /* raced or invalid command */ - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return SUCCESS; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); + /* Invalidate WRB Posted for this Task */ + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + aborted_io_task->pwrb_handle->pwrb, + 1); + conn = aborted_task->conn; beiscsi_conn = conn->dd_data; phba = beiscsi_conn->phba; @@ -226,13 +284,13 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc) nonemb_cmd.va, nonemb_cmd.dma); return FAILED; - } else { - wait_event_interruptible(phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag]); - free_mcc_tag(&phba->ctrl, tag); } - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); + + rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return iscsi_eh_abort(sc); } @@ -248,13 +306,14 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) struct invalidate_command_table *inv_tbl; struct be_dma_mem nonemb_cmd; unsigned int cid, tag, i, num_invalidate; + int rc; /* invalidate iocbs */ cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->frwd_lock); if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); return FAILED; } conn = session->leadconn; @@ -270,15 +329,20 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) continue; - if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) + if (sc->device->lun != abrt_task->sc->device->lun) continue; + /* Invalidate WRB Posted for this Task */ + AMAP_SET_BITS(struct amap_iscsi_wrb, invld, + abrt_io_task->pwrb_handle->pwrb, + 1); + inv_tbl->cid = cid; inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; num_invalidate++; inv_tbl++; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->frwd_lock); inv_tbl = phba->inv_tbl; nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, @@ -301,13 +365,12 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return FAILED; - } else { - wait_event_interruptible(phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag]); - free_mcc_tag(&phba->ctrl, tag); } - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); + + rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); return iscsi_eh_device_reset(sc); } @@ -482,6 +545,7 @@ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, + { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, { 0 } }; MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); @@ -535,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) pci_set_drvdata(pcidev, phba); phba->interface_handle = 0xFFFFFFFF; - if (iscsi_host_add(shost, &phba->pcidev->dev)) - goto free_devices; - return phba; - -free_devices: - pci_dev_put(phba->pcidev); - iscsi_host_free(phba->shost); - return NULL; } static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) @@ -615,8 +671,19 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev) } pci_set_master(pcidev); - if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { - ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); + if (ret) { + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); + pci_disable_device(pcidev); + return ret; + } else { + ret = pci_set_consistent_dma_mask(pcidev, + DMA_BIT_MASK(32)); + } + } else { + ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret) { dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); pci_disable_device(pcidev); @@ -657,30 +724,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) return status; } +/** + * beiscsi_get_params()- Set the config paramters + * @phba: ptr device priv structure + **/ static void beiscsi_get_params(struct beiscsi_hba *phba) { - phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count - - (phba->fw_config.iscsi_cid_count - + BE2_TMFS - + BE2_NOPOUT_REQ)); - phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; - phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; + uint32_t total_cid_count = 0; + uint32_t total_icd_count = 0; + uint8_t ulp_num = 0; + + total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + + BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint32_t align_mask = 0; + uint32_t icd_post_per_page = 0; + uint32_t icd_count_unavailable = 0; + uint32_t icd_start = 0, icd_count = 0; + uint32_t icd_start_align = 0, icd_count_align = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + + /* Get ICD count that can be posted on each page */ + icd_post_per_page = (PAGE_SIZE / (BE2_SGE * + sizeof(struct iscsi_sge))); + align_mask = (icd_post_per_page - 1); + + /* Check if icd_start is aligned ICD per page posting */ + if (icd_start % icd_post_per_page) { + icd_start_align = ((icd_start + + icd_post_per_page) & + ~(align_mask)); + phba->fw_config. + iscsi_icd_start[ulp_num] = + icd_start_align; + } + + icd_count_align = (icd_count & ~align_mask); + + /* ICD discarded in the process of alignment */ + if (icd_start_align) + icd_count_unavailable = ((icd_start_align - + icd_start) + + (icd_count - + icd_count_align)); + + /* Updated ICD count available */ + phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - + icd_count_unavailable); + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Aligned ICD values\n" + "\t ICD Start : %d\n" + "\t ICD Count : %d\n" + "\t ICD Discarded : %d\n", + phba->fw_config. + iscsi_icd_start[ulp_num], + phba->fw_config. + iscsi_icd_count[ulp_num], + icd_count_unavailable); + break; + } + } + + total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + phba->params.ios_per_ctrl = (total_icd_count - + (total_cid_count + + BE2_TMFS + BE2_NOPOUT_REQ)); + phba->params.cxns_per_ctrl = total_cid_count; + phba->params.asyncpdus_per_ctrl = total_cid_count; + phba->params.icds_per_ctrl = total_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; phba->params.eq_timer = 64; - phba->params.num_eq_entries = - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 - + BE2_TMFS) / 512) + 1) * 512; - phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) - ? 1024 : phba->params.num_eq_entries; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : phba->params.num_eq_entries=%d\n", - phba->params.num_eq_entries); - phba->params.num_cq_entries = - (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 - + BE2_TMFS) / 512) + 1) * 512; + phba->params.num_eq_entries = 1024; + phba->params.num_cq_entries = 1024; phba->params.wrbs_per_cxn = 256; } @@ -690,14 +812,23 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba, unsigned char rearm, unsigned char event) { u32 val = 0; - val |= id & DB_EQ_RING_ID_MASK; + if (rearm) val |= 1 << DB_EQ_REARM_SHIFT; if (clr_interrupt) val |= 1 << DB_EQ_CLR_SHIFT; if (event) val |= 1 << DB_EQ_EVNT_SHIFT; + val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; + /* Setting lower order EQ_ID Bits */ + val |= (id & DB_EQ_RING_ID_LOW_MASK); + + /* Setting Higher order EQ_ID Bits */ + val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & + DB_EQ_RING_ID_HIGH_MASK) + << DB_EQ_HIGH_SET_SHIFT); + iowrite32(val, phba->db_va + DB_EQ_OFFSET); } @@ -730,7 +861,7 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id) resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) { spin_lock_irqsave(&phba->isr_lock, flags); - phba->todo_mcc_cq = 1; + pbe_eq->todo_mcc_cq = true; spin_unlock_irqrestore(&phba->isr_lock, flags); } AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); @@ -738,8 +869,8 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id) eqe = queue_tail_node(eq); num_eq_processed++; } - if (phba->todo_mcc_cq) - queue_work(phba->wq, &phba->work_cqs); + if (pbe_eq->todo_mcc_cq) + queue_work(phba->wq, &pbe_eq->work_cqs); if (num_eq_processed) hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); @@ -759,7 +890,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id) struct be_queue_info *cq; unsigned int num_eq_processed; struct be_eq_obj *pbe_eq; - unsigned long flags; pbe_eq = dev_id; eq = &pbe_eq->q; @@ -768,40 +898,21 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id) phba = pbe_eq->phba; num_eq_processed = 0; - if (blk_iopoll_enabled) { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); - - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - num_eq_processed++; - } - if (num_eq_processed) - hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); - return IRQ_HANDLED; - } else { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - spin_lock_irqsave(&phba->isr_lock, flags); - phba->todo_cq = 1; - spin_unlock_irqrestore(&phba->isr_lock, flags); - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - num_eq_processed++; - } - if (phba->todo_cq) - queue_work(phba->wq, &phba->work_cqs); + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_eq_processed++; + } - if (num_eq_processed) - hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); + if (num_eq_processed) + hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); - return IRQ_HANDLED; - } + return IRQ_HANDLED; } /** @@ -816,7 +927,6 @@ static irqreturn_t be_isr(int irq, void *dev_id) struct hwi_context_memory *phwi_context; struct be_eq_entry *eqe = NULL; struct be_queue_info *eq; - struct be_queue_info *cq; struct be_queue_info *mcc; unsigned long flags, index; unsigned int num_mcceq_processed, num_ioeq_processed; @@ -842,72 +952,40 @@ static irqreturn_t be_isr(int irq, void *dev_id) num_ioeq_processed = 0; num_mcceq_processed = 0; - if (blk_iopoll_enabled) { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - if (((eqe->dw[offsetof(struct amap_eq_entry, - resource_id) / 32] & - EQE_RESID_MASK) >> 16) == mcc->id) { - spin_lock_irqsave(&phba->isr_lock, flags); - phba->todo_mcc_cq = 1; - spin_unlock_irqrestore(&phba->isr_lock, flags); - num_mcceq_processed++; - } else { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); - num_ioeq_processed++; - } - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - } - if (num_ioeq_processed || num_mcceq_processed) { - if (phba->todo_mcc_cq) - queue_work(phba->wq, &phba->work_cqs); - - if ((num_mcceq_processed) && (!num_ioeq_processed)) - hwi_ring_eq_db(phba, eq->id, 0, - (num_ioeq_processed + - num_mcceq_processed) , 1, 1); - else - hwi_ring_eq_db(phba, eq->id, 0, - (num_ioeq_processed + - num_mcceq_processed), 0, 1); - - return IRQ_HANDLED; - } else - return IRQ_NONE; - } else { - cq = &phwi_context->be_cq[0]; - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - - if (((eqe->dw[offsetof(struct amap_eq_entry, - resource_id) / 32] & - EQE_RESID_MASK) >> 16) != cq->id) { - spin_lock_irqsave(&phba->isr_lock, flags); - phba->todo_mcc_cq = 1; - spin_unlock_irqrestore(&phba->isr_lock, flags); - } else { - spin_lock_irqsave(&phba->isr_lock, flags); - phba->todo_cq = 1; - spin_unlock_irqrestore(&phba->isr_lock, flags); - } - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (((eqe->dw[offsetof(struct amap_eq_entry, + resource_id) / 32] & + EQE_RESID_MASK) >> 16) == mcc->id) { + spin_lock_irqsave(&phba->isr_lock, flags); + pbe_eq->todo_mcc_cq = true; + spin_unlock_irqrestore(&phba->isr_lock, flags); + num_mcceq_processed++; + } else { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); num_ioeq_processed++; } - if (phba->todo_cq || phba->todo_mcc_cq) - queue_work(phba->wq, &phba->work_cqs); + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + } + if (num_ioeq_processed || num_mcceq_processed) { + if (pbe_eq->todo_mcc_cq) + queue_work(phba->wq, &pbe_eq->work_cqs); - if (num_ioeq_processed) { + if ((num_mcceq_processed) && (!num_ioeq_processed)) hwi_ring_eq_db(phba, eq->id, 0, - num_ioeq_processed, 1, 1); - return IRQ_HANDLED; - } else - return IRQ_NONE; - } + (num_ioeq_processed + + num_mcceq_processed) , 1, 1); + else + hwi_ring_eq_db(phba, eq->id, 0, + (num_ioeq_processed + + num_mcceq_processed), 0, 1); + + return IRQ_HANDLED; + } else + return IRQ_NONE; } static int beiscsi_init_irqs(struct beiscsi_hba *phba) @@ -982,22 +1060,31 @@ free_msix_irqs: return ret; } -static void hwi_ring_cq_db(struct beiscsi_hba *phba, +void hwi_ring_cq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int num_processed, unsigned char rearm, unsigned char event) { u32 val = 0; - val |= id & DB_CQ_RING_ID_MASK; + if (rearm) val |= 1 << DB_CQ_REARM_SHIFT; + val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; + + /* Setting lower order CQ_ID Bits */ + val |= (id & DB_CQ_RING_ID_LOW_MASK); + + /* Setting Higher order CQ_ID Bits */ + val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & + DB_CQ_RING_ID_HIGH_MASK) + << DB_CQ_HIGH_SET_SHIFT); + iowrite32(val, phba->db_va + DB_CQ_OFFSET); } static unsigned int beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, - unsigned short cid, struct pdu_base *ppdu, unsigned long pdu_len, void *pbuffer, unsigned long buf_len) @@ -1040,9 +1127,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, return 1; } - spin_lock_bh(&session->lock); + spin_lock_bh(&session->back_lock); __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); return 0; } @@ -1109,9 +1196,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[cid]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (pwrb_context->wrb_handles_available >= 2) { pwrb_handle = pwrb_context->pwrb_handle_base[ pwrb_context->alloc_index]; @@ -1211,7 +1299,8 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) static void be_complete_io(struct beiscsi_conn *beiscsi_conn, - struct iscsi_task *task, struct sol_cqe *psol) + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) { struct beiscsi_io_task *io_task = task->dd_data; struct be_status_bhs *sts_bhs = @@ -1221,23 +1310,19 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, u32 resid = 0, exp_cmdsn, max_cmdsn; u8 rsp, status, flags; - exp_cmdsn = (psol-> - dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] - & SOL_EXP_CMD_SN_MASK); - max_cmdsn = ((psol-> - dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] - & SOL_EXP_CMD_SN_MASK) + - ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) - / 32] & SOL_CMD_WND_MASK) >> 24) - 1); - rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32] - & SOL_RESP_MASK) >> 16); - status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32] - & SOL_STS_MASK) >> 8); - flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] - & SOL_FLAGS_MASK) >> 24) | 0x80; + exp_cmdsn = csol_cqe->exp_cmdsn; + max_cmdsn = (csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + rsp = csol_cqe->i_resp; + status = csol_cqe->i_sts; + flags = csol_cqe->i_flags; + resid = csol_cqe->res_cnt; + if (!task->sc) { - if (io_task->scsi_cmnd) + if (io_task->scsi_cmnd) { scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } return; } @@ -1249,9 +1334,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, /* bidi not initially supported */ if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { - resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / - 32] & SOL_RES_CNT_MASK); - if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) task->sc->result = DID_ERROR << 16; @@ -1273,21 +1355,18 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); } - if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) { - if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] - & SOL_RES_CNT_MASK) - conn->rxdata_octets += (psol-> - dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] - & SOL_RES_CNT_MASK); - } + if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) + conn->rxdata_octets += resid; unmap: scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); } static void be_complete_logout(struct beiscsi_conn *beiscsi_conn, - struct iscsi_task *task, struct sol_cqe *psol) + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) { struct iscsi_logout_rsp *hdr; struct beiscsi_io_task *io_task = task->dd_data; @@ -1297,18 +1376,12 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, hdr->opcode = ISCSI_OP_LOGOUT_RSP; hdr->t2wait = 5; hdr->t2retain = 0; - hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] - & SOL_FLAGS_MASK) >> 24) | 0x80; - hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / - 32] & SOL_RESP_MASK); - hdr->exp_cmdsn = cpu_to_be32(psol-> - dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] - & SOL_EXP_CMD_SN_MASK); - hdr->max_cmdsn = be32_to_cpu((psol-> - dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] - & SOL_EXP_CMD_SN_MASK) + - ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) - / 32] & SOL_CMD_WND_MASK) >> 24) - 1); + hdr->flags = csol_cqe->i_flags; + hdr->response = csol_cqe->i_resp; + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + hdr->dlength[0] = 0; hdr->dlength[1] = 0; hdr->dlength[2] = 0; @@ -1319,7 +1392,8 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, static void be_complete_tmf(struct beiscsi_conn *beiscsi_conn, - struct iscsi_task *task, struct sol_cqe *psol) + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) { struct iscsi_tm_rsp *hdr; struct iscsi_conn *conn = beiscsi_conn->conn; @@ -1327,16 +1401,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, hdr = (struct iscsi_tm_rsp *)task->hdr; hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; - hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] - & SOL_FLAGS_MASK) >> 24) | 0x80; - hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / - 32] & SOL_RESP_MASK); - hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, - i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); - hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, - i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + - ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) - / 32] & SOL_CMD_WND_MASK) >> 24) - 1); + hdr->flags = csol_cqe->i_flags; + hdr->response = csol_cqe->i_resp; + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } @@ -1350,50 +1420,105 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; struct beiscsi_io_task *io_task; - struct iscsi_conn *conn = beiscsi_conn->conn; - struct iscsi_session *session = conn->session; + uint16_t wrb_index, cid, cri_index; phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[((psol-> - dw[offsetof(struct amap_sol_cqe, cid) / 32] & - SOL_CID_MASK) >> 6) - - phba->fw_config.iscsi_cid_start]; - pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> - dw[offsetof(struct amap_sol_cqe, wrb_index) / - 32] & SOL_WRB_INDEX_MASK) >> 16)]; + if (is_chip_be2_be3r(phba)) { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + wrb_idx, psol); + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + cid, psol); + } else { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + wrb_idx, psol); + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + cid, psol); + } + + cri_index = BE_GET_CRI_FROM_CID(cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; task = pwrb_handle->pio_handle; io_task = task->dd_data; - spin_lock_bh(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, io_task->psgl_handle); - spin_unlock_bh(&phba->mgmt_sgl_lock); - spin_lock_bh(&session->lock); - free_wrb_handle(phba, pwrb_context, pwrb_handle); - spin_unlock_bh(&session->lock); + memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); + iscsi_put_task(task); } static void be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, - struct iscsi_task *task, struct sol_cqe *psol) + struct iscsi_task *task, + struct common_sol_cqe *csol_cqe) { struct iscsi_nopin *hdr; struct iscsi_conn *conn = beiscsi_conn->conn; struct beiscsi_io_task *io_task = task->dd_data; hdr = (struct iscsi_nopin *)task->hdr; - hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] - & SOL_FLAGS_MASK) >> 24) | 0x80; - hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, - i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); - hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, - i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + - ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) - / 32] & SOL_CMD_WND_MASK) >> 24) - 1); + hdr->flags = csol_cqe->i_flags; + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); + hdr->opcode = ISCSI_OP_NOOP_IN; hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); } +static void adapter_get_sol_cqe(struct beiscsi_hba *phba, + struct sol_cqe *psol, + struct common_sol_cqe *csol_cqe) +{ + if (is_chip_be2_be3r(phba)) { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, + i_res_cnt, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + i_cmd_wnd, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, + hw_sts, psol); + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, + i_resp, psol); + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, + i_sts, psol); + csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, + i_flags, psol); + } else { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_res_cnt, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, + hw_sts, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_cmd_wnd, psol); + if (AMAP_GET_BITS(struct amap_sol_cqe_v2, + cmd_cmpl, psol)) + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_sts, psol); + else + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, + i_sts, psol); + if (AMAP_GET_BITS(struct amap_sol_cqe_v2, + u, psol)) + csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; + + if (AMAP_GET_BITS(struct amap_sol_cqe_v2, + o, psol)) + csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; + } +} + + static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, struct sol_cqe *psol) { @@ -1405,37 +1530,40 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, unsigned int type; struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; + struct common_sol_cqe csol_cqe = {0}; + uint16_t cri_index = 0; phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof - (struct amap_sol_cqe, cid) / 32] - & SOL_CID_MASK) >> 6) - - phba->fw_config.iscsi_cid_start]; - pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> - dw[offsetof(struct amap_sol_cqe, wrb_index) / - 32] & SOL_WRB_INDEX_MASK) >> 16)]; + + /* Copy the elements to a common structure */ + adapter_get_sol_cqe(phba, psol, &csol_cqe); + + cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + pwrb_handle = pwrb_context->pwrb_handle_basestd[ + csol_cqe.wrb_index]; + task = pwrb_handle->pio_handle; pwrb = pwrb_handle->pwrb; - type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & - WRB_TYPE_MASK) >> 28; + type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->back_lock); switch (type) { case HWH_TYPE_IO: case HWH_TYPE_IO_RD: if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_NOOP_OUT) - be_complete_nopin_resp(beiscsi_conn, task, psol); + be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); else - be_complete_io(beiscsi_conn, task, psol); + be_complete_io(beiscsi_conn, task, &csol_cqe); break; case HWH_TYPE_LOGOUT: if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) - be_complete_logout(beiscsi_conn, task, psol); + be_complete_logout(beiscsi_conn, task, &csol_cqe); else - be_complete_tmf(beiscsi_conn, task, psol); - + be_complete_tmf(beiscsi_conn, task, &csol_cqe); break; case HWH_TYPE_LOGIN: @@ -1446,7 +1574,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, break; case HWH_TYPE_NOP: - be_complete_nopin_resp(beiscsi_conn, task, psol); + be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); break; default: @@ -1454,14 +1582,12 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, "BM_%d : In hwi_complete_cmd, unknown type = %d" "wrb_index 0x%x CID 0x%x\n", type, - ((psol->dw[offsetof(struct amap_iscsi_wrb, - type) / 32] & SOL_WRB_INDEX_MASK) >> 16), - ((psol->dw[offsetof(struct amap_sol_cqe, - cid) / 32] & SOL_CID_MASK) >> 6)); + csol_cqe.wrb_index, + csol_cqe.cid); break; } - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); } static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context @@ -1485,13 +1611,26 @@ hwi_get_async_handle(struct beiscsi_hba *phba, struct list_head *pbusy_list; struct async_pdu_handle *pasync_handle = NULL; unsigned char is_header = 0; + unsigned int index, dpl; + + if (is_chip_be2_be3r(phba)) { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + dpl, pdpdu_cqe); + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + index, pdpdu_cqe); + } else { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + dpl, pdpdu_cqe); + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + index, pdpdu_cqe); + } phys_addr.u.a32.address_lo = - pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] - - ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] - & PDUCQE_DPL_MASK) >> 16); + (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, + db_addr_lo) / 32] - dpl); phys_addr.u.a32.address_hi = - pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32]; + pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, + db_addr_hi) / 32]; phys_addr.u.a64.address = *((unsigned long long *)(&phys_addr.u.a64.address)); @@ -1501,14 +1640,12 @@ hwi_get_async_handle(struct beiscsi_hba *phba, case UNSOL_HDR_NOTIFY: is_header = 1; - pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1, - (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, - index) / 32] & PDUCQE_INDEX_MASK)); + pbusy_list = hwi_get_async_busy_list(pasync_ctx, + is_header, index); break; case UNSOL_DATA_NOTIFY: - pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe-> - dw[offsetof(struct amap_i_t_dpdu_cqe, - index) / 32] & PDUCQE_INDEX_MASK)); + pbusy_list = hwi_get_async_busy_list(pasync_ctx, + is_header, index); break; default: pbusy_list = NULL; @@ -1528,15 +1665,12 @@ hwi_get_async_handle(struct beiscsi_hba *phba, WARN_ON(!pasync_handle); - pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start; + pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); pasync_handle->is_header = is_header; - pasync_handle->buffer_len = ((pdpdu_cqe-> - dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] - & PDUCQE_DPL_MASK) >> 16); + pasync_handle->buffer_len = dpl; + *pcq_index = index; - *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, - index) / 32] & PDUCQE_INDEX_MASK); return pasync_handle; } @@ -1592,18 +1726,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba, } static void hwi_free_async_msg(struct beiscsi_hba *phba, - unsigned int cri) + struct hwi_async_pdu_context *pasync_ctx, + unsigned int cri) { - struct hwi_controller *phwi_ctrlr; - struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle, *tmp_handle; struct list_head *plist; - phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); - plist = &pasync_ctx->async_entry[cri].wait_queue.list; - list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { list_del(&pasync_handle->link); @@ -1638,7 +1767,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, } static void hwi_post_async_buffers(struct beiscsi_hba *phba, - unsigned int is_header) + unsigned int is_header, uint8_t ulp_num) { struct hwi_controller *phwi_ctrlr; struct hwi_async_pdu_context *pasync_ctx; @@ -1646,13 +1775,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, struct list_head *pfree_link, *pbusy_list; struct phys_addr *pasync_sge; unsigned int ring_id, num_entries; - unsigned int host_write_num; + unsigned int host_write_num, doorbell_offset; unsigned int writables; unsigned int i = 0; u32 doorbell = 0; phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); num_entries = pasync_ctx->num_entries; if (is_header) { @@ -1660,13 +1789,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, pasync_ctx->async_header.free_entries); pfree_link = pasync_ctx->async_header.free_list.next; host_write_num = pasync_ctx->async_header.host_write_ptr; - ring_id = phwi_ctrlr->default_pdu_hdr.id; + ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. + doorbell_offset; } else { writables = min(pasync_ctx->async_data.writables, pasync_ctx->async_data.free_entries); pfree_link = pasync_ctx->async_data.free_list.next; host_write_num = pasync_ctx->async_data.host_write_ptr; - ring_id = phwi_ctrlr->default_pdu_data.id; + ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; + doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. + doorbell_offset; } writables = (writables / 8) * 8; @@ -1714,7 +1847,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba, doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; - iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + doorbell_offset); } } @@ -1726,9 +1859,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle = NULL; unsigned int cq_index = -1; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, pdpdu_cqe, &cq_index); @@ -1737,8 +1874,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, hwi_update_async_writables(phba, pasync_ctx, pasync_handle->is_header, cq_index); - hwi_free_async_msg(phba, pasync_handle->cri); - hwi_post_async_buffers(phba, pasync_handle->is_header); + hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); + hwi_post_async_buffers(phba, pasync_handle->is_header, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); } static unsigned int @@ -1774,12 +1913,10 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, } status = beiscsi_process_async_pdu(beiscsi_conn, phba, - (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start), phdr, hdr_len, pfirst_buffer, offset); - hwi_free_async_msg(phba, cri); + hwi_free_async_msg(phba, pasync_ctx, cri); return 0; } @@ -1795,13 +1932,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, struct pdu_base *ppdu; phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + BE_GET_CRI_FROM_CID(beiscsi_conn-> + beiscsi_conn_cid))); list_del(&pasync_handle->link); if (pasync_handle->is_header) { pasync_ctx->async_header.busy_entries--; if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { - hwi_free_async_msg(phba, cri); + hwi_free_async_msg(phba, pasync_ctx, cri); BUG(); } @@ -1856,9 +1996,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, struct hwi_async_pdu_context *pasync_ctx; struct async_pdu_handle *pasync_handle = NULL; unsigned int cq_index = -1; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); + pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, + BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, + cri_index)); + pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, pdpdu_cqe, &cq_index); @@ -1867,7 +2012,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, pasync_handle->is_header, cq_index); hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); - hwi_post_async_buffers(phba, pasync_handle->is_header); + hwi_post_async_buffers(phba, pasync_handle->is_header, + BEISCSI_GET_ULP_FROM_CRI( + phwi_ctrlr, cri_index)); } static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) @@ -1914,6 +2061,13 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) } +/** + * beiscsi_process_cq()- Process the Completion Queue + * @pbe_eq: Event Q on which the Completion has come + * + * return + * Number of Completion Entries processed. + **/ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) { struct be_queue_info *cq; @@ -1922,6 +2076,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) unsigned int num_processed = 0; unsigned int tot_nump = 0; unsigned short code = 0, cid = 0; + uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -1935,12 +2090,26 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) CQE_VALID_MASK) { be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); - cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] & - CQE_CID_MASK) >> 6); - code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] & - CQE_CODE_MASK); - ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; + code = (sol->dw[offsetof(struct amap_sol_cqe, code) / + 32] & CQE_CODE_MASK); + + /* Get the CID */ + if (is_chip_be2_be3r(phba)) { + cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } else { + if ((code == DRIVERMSG_NOTIFY) || + (code == UNSOL_HDR_NOTIFY) || + (code == UNSOL_DATA_NOTIFY)) + cid = AMAP_GET_BITS( + struct amap_i_t_dpdu_cqe_v2, + cid, sol); + else + cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, + cid, sol); + } + cri_index = BE_GET_CRI_FROM_CID(cid); + ep = phba->ep_array[cri_index]; beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -1958,7 +2127,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case DRIVERMSG_NOTIFY: beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : Received DRIVERMSG_NOTIFY\n"); + "BM_%d : Received %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); dmsg = (struct dmsg_cqe *)sol; hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); @@ -1966,26 +2136,32 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case UNSOL_HDR_NOTIFY: beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : Received UNSOL_HDR_ NOTIFY\n"); + "BM_%d : Received %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_process_default_pdu_ring(beiscsi_conn, phba, (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case UNSOL_DATA_NOTIFY: beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, - "BM_%d : Received UNSOL_DATA_NOTIFY\n"); + "BM_%d : Received %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_process_default_pdu_ring(beiscsi_conn, phba, (struct i_t_dpdu_cqe *)sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_INVALIDATE_INDEX_NOTIFY: case CMD_INVALIDATED_NOTIFY: case CXN_INVALIDATE_NOTIFY: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : Ignoring CQ Error notification for" - " cmd/cxn invalidate\n"); + "BM_%d : Ignoring %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); break; case SOL_CMD_KILLED_DATA_DIGEST_ERR: case CMD_KILLED_INVALID_STATSN_RCVD: @@ -1997,16 +2173,18 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case CMD_CXN_KILLED_INVALID_DATASN_RCVD: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, - "BM_%d : CQ Error notification for cmd.. " - "code %d cid 0x%x\n", code, cid); + "BM_%d : Cmd Notification %s[%d] on CID : %d\n", + cqe_desc[code], code, cid); break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : Digest error on def pdu ring," - " dropping..\n"); + "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", + cqe_desc[code], code, cid); + spin_lock_bh(&phba->async_pdu_lock); hwi_flush_default_pdu_buffer(phba, beiscsi_conn, (struct i_t_dpdu_cqe *) sol); + spin_unlock_bh(&phba->async_pdu_lock); break; case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: case CXN_KILLED_BURST_LEN_MISMATCH: @@ -2017,6 +2195,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case CXN_KILLED_INVALID_ITT_TTT_RCVD: case CXN_KILLED_TIMED_OUT: case CXN_KILLED_FIN_RCVD: + case CXN_KILLED_RST_SENT: + case CXN_KILLED_RST_RCVD: case CXN_KILLED_BAD_UNSOL_PDU_RCVD: case CXN_KILLED_BAD_WRB_INDEX_ERROR: case CXN_KILLED_OVER_RUN_RESIDUAL: @@ -2024,19 +2204,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : CQ Error %d, reset CID 0x%x...\n", - code, cid); - if (beiscsi_conn) - iscsi_conn_failure(beiscsi_conn->conn, - ISCSI_ERR_CONN_FAILED); - break; - case CXN_KILLED_RST_SENT: - case CXN_KILLED_RST_RCVD: - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : CQ Error %d, reset" - "received/sent on CID 0x%x...\n", - code, cid); + "BM_%d : Event %s[%d] received on CID : %d\n", + cqe_desc[code], code, cid); if (beiscsi_conn) iscsi_conn_failure(beiscsi_conn->conn, ISCSI_ERR_CONN_FAILED); @@ -2044,8 +2213,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) default: beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : CQ Error Invalid code= %d " - "received on CID 0x%x...\n", + "BM_%d : Invalid CQE Event Received Code : %d" + "CID 0x%x...\n", code, cid); break; } @@ -2068,40 +2237,41 @@ void beiscsi_process_all_cqs(struct work_struct *work) unsigned long flags; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; - struct be_eq_obj *pbe_eq; - struct beiscsi_hba *phba = - container_of(work, struct beiscsi_hba, work_cqs); + struct beiscsi_hba *phba; + struct be_eq_obj *pbe_eq = + container_of(work, struct be_eq_obj, work_cqs); + phba = pbe_eq->phba; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - if (phba->msix_enabled) - pbe_eq = &phwi_context->be_eq[phba->num_cpus]; - else - pbe_eq = &phwi_context->be_eq[0]; - if (phba->todo_mcc_cq) { + if (pbe_eq->todo_mcc_cq) { spin_lock_irqsave(&phba->isr_lock, flags); - phba->todo_mcc_cq = 0; + pbe_eq->todo_mcc_cq = false; spin_unlock_irqrestore(&phba->isr_lock, flags); beiscsi_process_mcc_isr(phba); } - if (phba->todo_cq) { + if (pbe_eq->todo_cq) { spin_lock_irqsave(&phba->isr_lock, flags); - phba->todo_cq = 0; + pbe_eq->todo_cq = false; spin_unlock_irqrestore(&phba->isr_lock, flags); beiscsi_process_cq(pbe_eq); } + + /* rearm EQ for further interrupts */ + hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); } static int be_iopoll(struct blk_iopoll *iop, int budget) { - static unsigned int ret; + unsigned int ret; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; pbe_eq = container_of(iop, struct be_eq_obj, iopoll); ret = beiscsi_process_cq(pbe_eq); + pbe_eq->cq_count += ret; if (ret < budget) { phba = pbe_eq->phba; blk_iopoll_complete(iop); @@ -2115,6 +2285,101 @@ static int be_iopoll(struct blk_iopoll *iop, int budget) } static void +hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, + unsigned int num_sg, struct beiscsi_io_task *io_task) +{ + struct iscsi_sge *psgl; + unsigned int sg_len, index; + unsigned int sge_len = 0; + unsigned long long addr; + struct scatterlist *l_sg; + unsigned int offset; + + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, + io_task->bhs_pa.u.a32.address_lo); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, + io_task->bhs_pa.u.a32.address_hi); + + l_sg = sg; + for (index = 0; (index < num_sg) && (index < 2); index++, + sg = sg_next(sg)) { + if (index == 0) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge0_addr_lo, pwrb, + lower_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge0_addr_hi, pwrb, + upper_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge0_len, pwrb, + sg_len); + sge_len = sg_len; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, + pwrb, sge_len); + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge1_addr_lo, pwrb, + lower_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge1_addr_hi, pwrb, + upper_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + sge1_len, pwrb, + sg_len); + } + } + psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; + memset(psgl, 0, sizeof(*psgl) * BE2_SGE); + + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); + + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + io_task->bhs_pa.u.a32.address_hi); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + io_task->bhs_pa.u.a32.address_lo); + + if (num_sg == 1) { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, + 1); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, + 0); + } else if (num_sg == 2) { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, + 0); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, + 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, + 0); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, + 0); + } + + sg = l_sg; + psgl++; + psgl++; + offset = 0; + for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, + lower_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, + upper_32_bits(addr)); + AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); + AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); + offset += sg_len; + } + psgl--; + AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); +} + +static void hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, unsigned int num_sg, struct beiscsi_io_task *io_task) { @@ -2202,13 +2467,18 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); } +/** + * hwi_write_buffer()- Populate the WRB with task info + * @pwrb: ptr to the WRB entry + * @task: iscsi task which is to be executed + **/ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) { struct iscsi_sge *psgl; - unsigned long long addr; struct beiscsi_io_task *io_task = task->dd_data; struct beiscsi_conn *beiscsi_conn = io_task->conn; struct beiscsi_hba *phba = beiscsi_conn->phba; + uint8_t dsp_value = 0; io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, @@ -2217,26 +2487,38 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) io_task->bhs_pa.u.a32.address_hi); if (task->data) { - if (task->data_count) { - AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); - addr = (u64) pci_map_single(phba->pcidev, - task->data, - task->data_count, 1); - } else { - AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); - addr = 0; - } + + /* Check for the data_count */ + dsp_value = (task->data_count) ? 1 : 0; + + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, + pwrb, dsp_value); + else + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, + pwrb, dsp_value); + + /* Map addr only if there is data_count */ + if (dsp_value) { + io_task->mtask_addr = pci_map_single(phba->pcidev, + task->data, + task->data_count, + PCI_DMA_TODEVICE); + io_task->mtask_data_count = task->data_count; + } else + io_task->mtask_addr = 0; + AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, - ((u32)(addr & 0xFFFFFFFF))); + lower_32_bits(io_task->mtask_addr)); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, - ((u32)(addr >> 32))); + upper_32_bits(io_task->mtask_addr)); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, task->data_count); AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); - addr = 0; + io_task->mtask_addr = 0; } psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; @@ -2259,35 +2541,28 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) psgl++; if (task->data) { AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, - ((u32)(addr & 0xFFFFFFFF))); + lower_32_bits(io_task->mtask_addr)); AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, - ((u32)(addr >> 32))); + upper_32_bits(io_task->mtask_addr)); } AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); } AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); } +/** + * beiscsi_find_mem_req()- Find mem needed + * @phba: ptr to HBA struct + **/ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) { + uint8_t mem_descr_index, ulp_num; unsigned int num_cq_pages, num_async_pdu_buf_pages; unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ sizeof(struct sol_cqe)); - num_async_pdu_buf_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - phba->params.defpdu_hdr_sz); - num_async_pdu_buf_sgl_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - sizeof(struct phys_addr)); - num_async_pdu_data_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - phba->params.defpdu_data_sz); - num_async_pdu_data_sgl_pages = - PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ - sizeof(struct phys_addr)); phba->params.hwi_ws_sz = sizeof(struct hwi_controller); @@ -2309,30 +2584,86 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) phba->params.icds_per_ctrl; phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * phba->params.icds_per_ctrl; - - phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = - num_async_pdu_buf_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = - num_async_pdu_data_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = - num_async_pdu_buf_sgl_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = - num_async_pdu_data_sgl_pages * PAGE_SIZE; - phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = - phba->params.asyncpdus_per_ctrl * - sizeof(struct async_pdu_handle); - phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = - phba->params.asyncpdus_per_ctrl * - sizeof(struct async_pdu_handle); - phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = - sizeof(struct hwi_async_pdu_context) + - (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + num_async_pdu_buf_sgl_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + sizeof(struct phys_addr)); + + num_async_pdu_buf_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + phba->params.defpdu_hdr_sz); + + num_async_pdu_data_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + phba->params.defpdu_data_sz); + + num_async_pdu_data_sgl_pages = + PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + phba, ulp_num) * + sizeof(struct phys_addr)); + + mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_buf_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + num_async_pdu_data_sgl_pages * + PAGE_SIZE; + + mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct async_pdu_handle); + + mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct async_pdu_handle); + + mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + phba->mem_req[mem_descr_index] = + sizeof(struct hwi_async_pdu_context) + + (BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct hwi_async_entry)); + } + } } static int beiscsi_alloc_mem(struct beiscsi_hba *phba) { - struct be_mem_descriptor *mem_descr; dma_addr_t bus_add; + struct hwi_controller *phwi_ctrlr; + struct be_mem_descriptor *mem_descr; struct mem_array *mem_arr, *mem_arr_orig; unsigned int i, j, alloc_size, curr_alloc_size; @@ -2340,9 +2671,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) if (!phba->phwi_ctrlr) return -ENOMEM; + /* Allocate memory for wrb_context */ + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctrlr->wrb_context) + return -ENOMEM; + phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); if (!phba->init_mem) { + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2351,12 +2691,19 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) GFP_KERNEL); if (!mem_arr_orig) { kfree(phba->init_mem); + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } mem_descr = phba->init_mem; for (i = 0; i < SE_MEM_MAX; i++) { + if (!phba->mem_req[i]) { + mem_descr->mem_array = NULL; + mem_descr++; + continue; + } + j = 0; mem_arr = mem_arr_orig; alloc_size = phba->mem_req[i]; @@ -2421,6 +2768,7 @@ free_mem: } kfree(mem_arr_orig); kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2459,6 +2807,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba) static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; + struct hwi_context_memory *phwi_ctxt; struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; @@ -2473,7 +2822,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) mem_descr_wrb += HWI_MEM_WRB; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + /* Allocate memory for WRBQ */ + phwi_ctxt = phwi_ctrlr->phwi_ctxt; + phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctxt->be_wrbq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRBQ Mem Alloc Failed\n"); + return -ENOMEM; + } + + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; pwrb_context->pwrb_handle_base = kzalloc(sizeof(struct wrb_handle *) * @@ -2516,7 +2876,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) } } idx = 0; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (!num_cxn_wrb) { pwrb = mem_descr_wrb->mem_array[idx].virtual_address; @@ -2545,8 +2905,9 @@ init_wrb_hndl_failed: return -ENOMEM; } -static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) +static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { + uint8_t ulp_num; struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; struct hwi_async_pdu_context *pasync_ctx; @@ -2554,146 +2915,150 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) unsigned int index, idx, num_per_mem, num_async_data; struct be_mem_descriptor *mem_descr; - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; - - phwi_ctrlr = phba->phwi_ctrlr; - phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + + (ulp_num * MEM_DESCR_OFFSET)); + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = + (struct hwi_async_pdu_context *) + mem_descr->mem_array[0].virtual_address; + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; + memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + + pasync_ctx->async_entry = + (struct hwi_async_entry *) + ((long unsigned int)pasync_ctx + + sizeof(struct hwi_async_pdu_context)); + + pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, + ulp_num); + pasync_ctx->buffer_size = p->defpdu_hdr_sz; + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.va_base = mem_descr->mem_array[0].virtual_address; - pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; - memset(pasync_ctx, 0, sizeof(*pasync_ctx)); - - pasync_ctx->num_entries = p->asyncpdus_per_ctrl; - pasync_ctx->buffer_size = p->defpdu_hdr_sz; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_BUF; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_BUF va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.va_base = - mem_descr->mem_array[0].virtual_address; - - pasync_ctx->async_header.pa_base.u.a64.address = - mem_descr->mem_array[0].bus_address.u.a64.address; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_RING; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_RING va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.ring_base = - mem_descr->mem_array[0].virtual_address; - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - pasync_ctx->async_header.handle_base = - mem_descr->mem_array[0].virtual_address; - pasync_ctx->async_header.writables = 0; - INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); - - - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_RING; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_DATA_RING va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - pasync_ctx->async_data.ring_base = - mem_descr->mem_array[0].virtual_address; + pasync_ctx->async_header.pa_base.u.a64.address = + mem_descr->mem_array[0]. + bus_address.u.a64.address; - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; - if (!mem_descr->mem_array[0].virtual_address) - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.ring_base = + mem_descr->mem_array[0].virtual_address; - pasync_ctx->async_data.handle_base = - mem_descr->mem_array[0].virtual_address; - pasync_ctx->async_data.writables = 0; - INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_header.handle_base = + mem_descr->mem_array[0].virtual_address; + pasync_ctx->async_header.writables = 0; + INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + pasync_ctx->async_data.ring_base = + mem_descr->mem_array[0].virtual_address; - pasync_header_h = - (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; - pasync_data_h = - (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (!mem_descr->mem_array[0].virtual_address) + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); - mem_descr = (struct be_mem_descriptor *)phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_BUF; - if (mem_descr->mem_array[0].virtual_address) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : hwi_init_async_pdu_ctx" - " HWI_MEM_ASYNC_DATA_BUF va=%p\n", - mem_descr->mem_array[0].virtual_address); - } else - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : No Virtual address\n"); - - idx = 0; - pasync_ctx->async_data.va_base = - mem_descr->mem_array[idx].virtual_address; - pasync_ctx->async_data.pa_base.u.a64.address = - mem_descr->mem_array[idx].bus_address.u.a64.address; - - num_async_data = ((mem_descr->mem_array[idx].size) / - phba->params.defpdu_data_sz); - num_per_mem = 0; - - for (index = 0; index < p->asyncpdus_per_ctrl; index++) { - pasync_header_h->cri = -1; - pasync_header_h->index = (char)index; - INIT_LIST_HEAD(&pasync_header_h->link); - pasync_header_h->pbuffer = - (void *)((unsigned long) - (pasync_ctx->async_header.va_base) + - (p->defpdu_hdr_sz * index)); - - pasync_header_h->pa.u.a64.address = - pasync_ctx->async_header.pa_base.u.a64.address + - (p->defpdu_hdr_sz * index); - - list_add_tail(&pasync_header_h->link, - &pasync_ctx->async_header.free_list); - pasync_header_h++; - pasync_ctx->async_header.free_entries++; - pasync_ctx->async_header.writables++; - - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); - INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. - header_busy_list); - pasync_data_h->cri = -1; - pasync_data_h->index = (char)index; - INIT_LIST_HEAD(&pasync_data_h->link); - - if (!num_async_data) { - num_per_mem = 0; - idx++; + pasync_ctx->async_data.handle_base = + mem_descr->mem_array[0].virtual_address; + pasync_ctx->async_data.writables = 0; + INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); + + pasync_header_h = + (struct async_pdu_handle *) + pasync_ctx->async_header.handle_base; + pasync_data_h = + (struct async_pdu_handle *) + pasync_ctx->async_data.handle_base; + + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + if (mem_descr->mem_array[0].virtual_address) { + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx" + " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", + ulp_num, + mem_descr->mem_array[0]. + virtual_address); + } else + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : No Virtual address for ULP : %d\n", + ulp_num); + + idx = 0; pasync_ctx->async_data.va_base = mem_descr->mem_array[idx].virtual_address; pasync_ctx->async_data.pa_base.u.a64.address = @@ -2702,31 +3067,84 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) num_async_data = ((mem_descr->mem_array[idx].size) / phba->params.defpdu_data_sz); + num_per_mem = 0; + + for (index = 0; index < BEISCSI_GET_CID_COUNT + (phba, ulp_num); index++) { + pasync_header_h->cri = -1; + pasync_header_h->index = (char)index; + INIT_LIST_HEAD(&pasync_header_h->link); + pasync_header_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx-> + async_header.va_base) + + (p->defpdu_hdr_sz * index)); + + pasync_header_h->pa.u.a64.address = + pasync_ctx->async_header.pa_base.u.a64. + address + (p->defpdu_hdr_sz * index); + + list_add_tail(&pasync_header_h->link, + &pasync_ctx->async_header. + free_list); + pasync_header_h++; + pasync_ctx->async_header.free_entries++; + pasync_ctx->async_header.writables++; + + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + wait_queue.list); + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + header_busy_list); + pasync_data_h->cri = -1; + pasync_data_h->index = (char)index; + INIT_LIST_HEAD(&pasync_data_h->link); + + if (!num_async_data) { + num_per_mem = 0; + idx++; + pasync_ctx->async_data.va_base = + mem_descr->mem_array[idx]. + virtual_address; + pasync_ctx->async_data.pa_base.u. + a64.address = + mem_descr->mem_array[idx]. + bus_address.u.a64.address; + num_async_data = + ((mem_descr->mem_array[idx]. + size) / + phba->params.defpdu_data_sz); + } + pasync_data_h->pbuffer = + (void *)((unsigned long) + (pasync_ctx->async_data.va_base) + + (p->defpdu_data_sz * num_per_mem)); + + pasync_data_h->pa.u.a64.address = + pasync_ctx->async_data.pa_base.u.a64. + address + (p->defpdu_data_sz * + num_per_mem); + num_per_mem++; + num_async_data--; + + list_add_tail(&pasync_data_h->link, + &pasync_ctx->async_data. + free_list); + pasync_data_h++; + pasync_ctx->async_data.free_entries++; + pasync_ctx->async_data.writables++; + + INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. + data_busy_list); + } + + pasync_ctx->async_header.host_write_ptr = 0; + pasync_ctx->async_header.ep_read_ptr = -1; + pasync_ctx->async_data.host_write_ptr = 0; + pasync_ctx->async_data.ep_read_ptr = -1; } - pasync_data_h->pbuffer = - (void *)((unsigned long) - (pasync_ctx->async_data.va_base) + - (p->defpdu_data_sz * num_per_mem)); - - pasync_data_h->pa.u.a64.address = - pasync_ctx->async_data.pa_base.u.a64.address + - (p->defpdu_data_sz * num_per_mem); - num_per_mem++; - num_async_data--; - - list_add_tail(&pasync_data_h->link, - &pasync_ctx->async_data.free_list); - pasync_data_h++; - pasync_ctx->async_data.free_entries++; - pasync_ctx->async_data.writables++; - - INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); - } - - pasync_ctx->async_header.host_write_ptr = 0; - pasync_ctx->async_header.ep_read_ptr = -1; - pasync_ctx->async_data.host_write_ptr = 0; - pasync_ctx->async_data.ep_read_ptr = -1; + } + + return 0; } static int @@ -2843,7 +3261,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba, } return 0; create_eq_error: - for (i = 0; i < (phba->num_cpus + 1); i++) { + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { eq = &phwi_context->be_eq[i].q; mem = &eq->dma_mem; if (mem->va) @@ -2921,7 +3339,7 @@ static int beiscsi_create_def_hdr(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, - unsigned int def_pdu_ring_sz) + unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; @@ -2931,36 +3349,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba, void *dq_vaddress; idx = 0; - dq = &phwi_context->be_def_hdrq; + dq = &phwi_context->be_def_hdrq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dq->dma_mem; mem_descr = phba->init_mem; - mem_descr += HWI_MEM_ASYNC_HEADER_RING; + mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); + "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", + ulp_num); + return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, def_pdu_ring_sz, - phba->params.defpdu_hdr_sz); + phba->params.defpdu_hdr_sz, + BEISCSI_DEFQ_HDR, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); + "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", + ulp_num); + return ret; } - phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : iscsi def pdu id is %d\n", - phwi_context->be_def_hdrq.id); - hwi_post_async_buffers(phba, 1); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_hdrq[ulp_num].id); + hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num); return 0; } @@ -2968,7 +3392,7 @@ static int beiscsi_create_def_data(struct beiscsi_hba *phba, struct hwi_context_memory *phwi_context, struct hwi_controller *phwi_ctrlr, - unsigned int def_pdu_ring_sz) + unsigned int def_pdu_ring_sz, uint8_t ulp_num) { unsigned int idx; int ret; @@ -2978,40 +3402,83 @@ beiscsi_create_def_data(struct beiscsi_hba *phba, void *dq_vaddress; idx = 0; - dataq = &phwi_context->be_def_dataq; + dataq = &phwi_context->be_def_dataq[ulp_num]; cq = &phwi_context->be_cq[0]; mem = &dataq->dma_mem; mem_descr = phba->init_mem; - mem_descr += HWI_MEM_ASYNC_DATA_RING; + mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); dq_vaddress = mem_descr->mem_array[idx].virtual_address; ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / sizeof(struct phys_addr), sizeof(struct phys_addr), dq_vaddress); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); + "BM_%d : be_fill_queue Failed for DEF PDU " + "DATA on ULP : %d\n", + ulp_num); + return ret; } mem->dma = (unsigned long)mem_descr->mem_array[idx]. bus_address.u.a64.address; ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, def_pdu_ring_sz, - phba->params.defpdu_data_sz); + phba->params.defpdu_data_sz, + BEISCSI_DEFQ_DATA, ulp_num); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d be_cmd_create_default_pdu_queue" - " Failed for DEF PDU DATA\n"); + " Failed for DEF PDU DATA on ULP : %d\n", + ulp_num); return ret; } - phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : iscsi def data id is %d\n", - phwi_context->be_def_dataq.id); + "BM_%d : iscsi def data id on ULP : %d is %d\n", + ulp_num, + phwi_context->be_def_dataq[ulp_num].id); - hwi_post_async_buffers(phba, 0); + hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : DEFAULT PDU DATA RING CREATED\n"); + "BM_%d : DEFAULT PDU DATA RING CREATED" + "on ULP : %d\n", ulp_num); + + return 0; +} + + +static int +beiscsi_post_template_hdr(struct beiscsi_hba *phba) +{ + struct be_mem_descriptor *mem_descr; + struct mem_array *pm_arr; + struct be_dma_mem sgl; + int status, ulp_num; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + mem_descr = (struct be_mem_descriptor *)phba->init_mem; + mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + + (ulp_num * MEM_DESCR_OFFSET); + pm_arr = mem_descr->mem_array; + + hwi_build_be_sgl_arr(phba, pm_arr, &sgl); + status = be_cmd_iscsi_post_template_hdr( + &phba->ctrl, &sgl); + + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Post Template HDR Failed for" + "ULP_%d\n", ulp_num); + return status; + } + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : Template HDR Pages Posted for" + "ULP_%d\n", ulp_num); + } + } return 0; } @@ -3022,14 +3489,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba) struct mem_array *pm_arr; unsigned int page_offset, i; struct be_dma_mem sgl; - int status; + int status, ulp_num = 0; mem_descr = phba->init_mem; mem_descr += HWI_MEM_SGE; pm_arr = mem_descr->mem_array; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * - phba->fw_config.iscsi_icd_start) / PAGE_SIZE; + phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; for (i = 0; i < mem_descr->num_elements; i++) { hwi_build_be_sgl_arr(phba, pm_arr, &sgl); status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, @@ -3081,12 +3552,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, { unsigned int wrb_mem_index, offset, size, num_wrb_rings; u64 pa_addr_lo; - unsigned int idx, num, i; + unsigned int idx, num, i, ulp_num; struct mem_array *pwrb_arr; void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; + struct hwi_wrb_context *pwrb_context; int status; + uint8_t ulp_count = 0, ulp_base_num = 0; + uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; idx = 0; mem_descr = phba->init_mem; @@ -3130,22 +3604,45 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, num_wrb_rings--; } } + + /* Get the ULP Count */ + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + ulp_count++; + ulp_base_num = ulp_num; + cid_count_ulp[ulp_num] = + BEISCSI_GET_CID_COUNT(phba, ulp_num); + } + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { wrb_mem_index = 0; offset = 0; size = 0; + if (ulp_count > 1) { + ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; + + if (!cid_count_ulp[ulp_base_num]) + ulp_base_num = (ulp_base_num + 1) % + BEISCSI_ULP_COUNT; + + cid_count_ulp[ulp_base_num]--; + } + + hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); status = be_cmd_wrbq_create(&phba->ctrl, &sgl, - &phwi_context->be_wrbq[i]); + &phwi_context->be_wrbq[i], + &phwi_ctrlr->wrb_context[i], + ulp_base_num); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : wrbq create failed."); kfree(pwrb_arr); return status; } - phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. - id; + pwrb_context = &phwi_ctrlr->wrb_context[i]; + BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); return 0; @@ -3158,7 +3655,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba) struct hwi_wrb_context *pwrb_context; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); @@ -3187,24 +3684,36 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; - int i, eq_num; + struct hwi_async_pdu_context *pasync_ctx; + int i, eq_for_mcc, ulp_num; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; + + be_cmd_iscsi_remove_template_hdr(ctrl); + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { q = &phwi_context->be_wrbq[i]; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } + kfree(phwi_context->be_wrbq); free_wrb_handles(phba); - q = &phwi_context->be_def_hdrq; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { - q = &phwi_context->be_def_dataq; - if (q->created) - beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + q = &phwi_context->be_def_hdrq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + + q = &phwi_context->be_def_dataq[ulp_num]; + if (q->created) + beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; + } + } beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); @@ -3213,16 +3722,18 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); } + + be_mcc_queues_destroy(phba); if (phba->msix_enabled) - eq_num = 1; + eq_for_mcc = 1; else - eq_num = 0; - for (i = 0; i < (phba->num_cpus + eq_num); i++) { + eq_for_mcc = 0; + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { q = &phwi_context->be_eq[i].q; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } - be_mcc_queues_destroy(phba); + be_cmd_fw_uninit(ctrl); } static int be_mcc_queues_create(struct beiscsi_hba *phba, @@ -3268,15 +3779,42 @@ err: return -ENOMEM; } -static int find_num_cpus(void) +/** + * find_num_cpus()- Get the CPU online count + * @phba: ptr to priv structure + * + * CPU count is used for creating EQ. + **/ +static void find_num_cpus(struct beiscsi_hba *phba) { int num_cpus = 0; num_cpus = num_online_cpus(); - if (num_cpus >= MAX_CPUS) - num_cpus = MAX_CPUS - 1; - return num_cpus; + switch (phba->generation) { + case BE_GEN2: + case BE_GEN3: + phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ? + BEISCSI_MAX_NUM_CPUS : num_cpus; + break; + case BE_GEN4: + /* + * If eqid_count == 1 fall back to + * INTX mechanism + **/ + if (phba->fw_config.eqid_count == 1) { + enable_msix = 0; + phba->num_cpus = 1; + return; + } + + phba->num_cpus = + (num_cpus > (phba->fw_config.eqid_count - 1)) ? + (phba->fw_config.eqid_count - 1) : num_cpus; + break; + default: + phba->num_cpus = 1; + } } static int hwi_init_port(struct beiscsi_hba *phba) @@ -3285,15 +3823,13 @@ static int hwi_init_port(struct beiscsi_hba *phba) struct hwi_context_memory *phwi_context; unsigned int def_pdu_ring_sz; struct be_ctrl_info *ctrl = &phba->ctrl; - int status; + int status, ulp_num; - def_pdu_ring_sz = - phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - phwi_context->max_eqd = 0; + phwi_context->max_eqd = 128; phwi_context->min_eqd = 0; - phwi_context->cur_eqd = 64; + phwi_context->cur_eqd = 0; be_cmd_fw_initialize(&phba->ctrl); status = beiscsi_create_eqs(phba, phwi_context); @@ -3321,27 +3857,48 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } - status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, - def_pdu_ring_sz); - if (status != 0) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Default Header not created\n"); - goto error; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + + def_pdu_ring_sz = + BEISCSI_GET_CID_COUNT(phba, ulp_num) * + sizeof(struct phys_addr); + + status = beiscsi_create_def_hdr(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Header not created for ULP : %d\n", + ulp_num); + goto error; + } + + status = beiscsi_create_def_data(phba, phwi_context, + phwi_ctrlr, + def_pdu_ring_sz, + ulp_num); + if (status != 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Default Data not created for ULP : %d\n", + ulp_num); + goto error; + } + } } - status = beiscsi_create_def_data(phba, phwi_context, - phwi_ctrlr, def_pdu_ring_sz); + status = beiscsi_post_pages(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Default Data not created\n"); + "BM_%d : Post SGL Pages Failed\n"); goto error; } - status = beiscsi_post_pages(phba); + status = beiscsi_post_template_hdr(phba); if (status != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Post SGL Pages Failed\n"); - goto error; + "BM_%d : Template HDR Posting for CXN Failed\n"); } status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); @@ -3351,6 +3908,26 @@ static int hwi_init_port(struct beiscsi_hba *phba) goto error; } + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + uint16_t async_arr_idx = 0; + + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + uint16_t cri = 0; + struct hwi_async_pdu_context *pasync_ctx; + + pasync_ctx = HWI_GET_ASYNC_PDU_CTX( + phwi_ctrlr, ulp_num); + for (cri = 0; cri < + phba->params.cxns_per_ctrl; cri++) { + if (ulp_num == BEISCSI_GET_ULP_FROM_CRI + (phwi_ctrlr, cri)) + pasync_ctx->cid_to_async_cri_map[ + phwi_ctrlr->wrb_context[cri].cid] = + async_arr_idx++; + } + } + } + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : hwi_init_port success\n"); return 0; @@ -3384,7 +3961,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba) if (beiscsi_init_wrb_handle(phba)) return -ENOMEM; - hwi_init_async_pdu_ctx(phba); + if (hwi_init_async_pdu_ctx(phba)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx failed\n"); + return -ENOMEM; + } + if (hwi_init_port(phba) != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_controller failed\n"); @@ -3410,10 +3992,12 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) (unsigned long)mem_descr->mem_array[j - 1]. bus_address.u.a64.address); } + kfree(mem_descr->mem_array); mem_descr++; } kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); } @@ -3448,6 +4032,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) struct sgl_handle *psgl_handle; struct iscsi_sge *pfrag; unsigned int arr_index, i, idx; + unsigned int ulp_icd_start, ulp_num = 0; phba->io_sgl_hndl_avbl = 0; phba->eh_sgl_hndl_avbl = 0; @@ -3514,6 +4099,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) "\n BM_%d : mem_descr_sg->num_elements=%d\n", mem_descr_sg->num_elements); + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) + if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) + break; + + ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; + arr_index = 0; idx = 0; while (idx < mem_descr_sg->num_elements) { @@ -3532,8 +4123,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); pfrag += phba->params.num_sge_per_io; - psgl_handle->sgl_index = - phba->fw_config.iscsi_icd_start + arr_index++; + psgl_handle->sgl_index = ulp_icd_start + arr_index++; } idx++; } @@ -3546,32 +4136,105 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { - int i, new_cid; + int ret; + uint16_t i, ulp_num; + struct ulp_cid_info *ptr_cid_info = NULL; - phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, - GFP_KERNEL); - if (!phba->cid_array) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Failed to allocate memory in " - "hba_setup_cid_tbls\n"); - return -ENOMEM; + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), + GFP_KERNEL); + + if (!ptr_cid_info) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory" + "for ULP_CID_INFO for ULP : %d\n", + ulp_num); + ret = -ENOMEM; + goto free_memory; + + } + + /* Allocate memory for CID array */ + ptr_cid_info->cid_array = kzalloc(sizeof(void *) * + BEISCSI_GET_CID_COUNT(phba, + ulp_num), GFP_KERNEL); + if (!ptr_cid_info->cid_array) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory" + "for CID_ARRAY for ULP : %d\n", + ulp_num); + kfree(ptr_cid_info); + ptr_cid_info = NULL; + ret = -ENOMEM; + + goto free_memory; + } + ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( + phba, ulp_num); + + /* Save the cid_info_array ptr */ + phba->cid_array_info[ulp_num] = ptr_cid_info; + } } phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * - phba->params.cxns_per_ctrl * 2, GFP_KERNEL); + phba->params.cxns_per_ctrl, GFP_KERNEL); if (!phba->ep_array) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory in " "hba_setup_cid_tbls\n"); - kfree(phba->cid_array); - return -ENOMEM; + ret = -ENOMEM; + + goto free_memory; + } + + phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * + phba->params.cxns_per_ctrl, GFP_KERNEL); + if (!phba->conn_table) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory in" + "hba_setup_cid_tbls\n"); + + kfree(phba->ep_array); + phba->ep_array = NULL; + ret = -ENOMEM; + + goto free_memory; } - new_cid = phba->fw_config.iscsi_cid_start; + for (i = 0; i < phba->params.cxns_per_ctrl; i++) { - phba->cid_array[i] = new_cid; - new_cid += 2; + ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; + + ptr_cid_info = phba->cid_array_info[ulp_num]; + ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = + phba->phwi_ctrlr->wrb_context[i].cid; + + } + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + ptr_cid_info->cid_alloc = 0; + ptr_cid_info->cid_free = 0; + } } - phba->avlbl_cids = phba->params.cxns_per_ctrl; return 0; + +free_memory: + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } + + return ret; } static void hwi_enable_intr(struct beiscsi_hba *phba) @@ -3644,12 +4307,9 @@ static void hwi_disable_intr(struct beiscsi_hba *phba) static int beiscsi_get_boot_info(struct beiscsi_hba *phba) { struct be_cmd_get_session_resp *session_resp; - struct be_mcc_wrb *wrb; struct be_dma_mem nonemb_cmd; - unsigned int tag, wrb_num; - unsigned short status, extd_status; + unsigned int tag; unsigned int s_handle; - struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; int ret = -ENOMEM; /* Get the session handle of the boot target */ @@ -3682,25 +4342,20 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba) " Failed\n"); goto boot_freemem; - } else - wait_event_interruptible(phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag]); + } - wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; - extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; - status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; - if (status || extd_status) { + ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, - "BM_%d : beiscsi_get_session_info Failed" - " status = %d extd_status = %d\n", - status, extd_status); + "BM_%d : beiscsi_get_session_info Failed"); - free_mcc_tag(&phba->ctrl, tag); - goto boot_freemem; + if (ret != -EBUSY) + goto boot_freemem; + else + return ret; } - wrb = queue_get_wrb(mccq, wrb_num); - free_mcc_tag(&phba->ctrl, tag); + session_resp = nonemb_cmd.va ; memcpy(&phba->boot_sess, &session_resp->session_info, @@ -3838,21 +4493,92 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) static void beiscsi_clean_port(struct beiscsi_hba *phba) { - int mgmt_status; - - mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); - if (mgmt_status) - beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, - "BM_%d : mgmt_epfw_cleanup FAILED\n"); + int mgmt_status, ulp_num; + struct ulp_cid_info *ptr_cid_info = NULL; + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); + if (mgmt_status) + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : mgmt_epfw_cleanup FAILED" + " for ULP_%d\n", ulp_num); + } + } hwi_purge_eq(phba); hwi_cleanup(phba); kfree(phba->io_sgl_hndl_base); kfree(phba->eh_sgl_hndl_base); - kfree(phba->cid_array); kfree(phba->ep_array); + kfree(phba->conn_table); + + for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { + if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { + ptr_cid_info = phba->cid_array_info[ulp_num]; + + if (ptr_cid_info) { + kfree(ptr_cid_info->cid_array); + kfree(ptr_cid_info); + phba->cid_array_info[ulp_num] = NULL; + } + } + } + +} + +/** + * beiscsi_free_mgmt_task_handles()- Free driver CXN resources + * @beiscsi_conn: ptr to the conn to be cleaned up + * @task: ptr to iscsi_task resource to be freed. + * + * Free driver mgmt resources binded to CXN. + **/ +void +beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + io_task = task->dd_data; + + if (io_task->pwrb_handle) { + memset(io_task->pwrb_handle->pwrb, 0, + sizeof(struct iscsi_wrb)); + free_wrb_handle(phba, pwrb_context, + io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + spin_lock_bh(&phba->mgmt_sgl_lock); + free_mgmt_sgl_handle(phba, + io_task->psgl_handle); + io_task->psgl_handle = NULL; + spin_unlock_bh(&phba->mgmt_sgl_lock); + } + + if (io_task->mtask_addr) + pci_unmap_single(phba->pcidev, + io_task->mtask_addr, + io_task->mtask_data_count, + PCI_DMA_TODEVICE); } +/** + * beiscsi_cleanup_task()- Free driver resources of the task + * @task: ptr to the iscsi task + * + **/ static void beiscsi_cleanup_task(struct iscsi_task *task) { struct beiscsi_io_task *io_task = task->dd_data; @@ -3862,10 +4588,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, @@ -3886,21 +4613,14 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) spin_unlock(&phba->io_sgl_lock); io_task->psgl_handle = NULL; } - } else { - if (!beiscsi_conn->login_in_progress) { - if (io_task->pwrb_handle) { - free_wrb_handle(phba, pwrb_context, - io_task->pwrb_handle); - io_task->pwrb_handle = NULL; - } - if (io_task->psgl_handle) { - spin_lock(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, - io_task->psgl_handle); - spin_unlock(&phba->mgmt_sgl_lock); - io_task->psgl_handle = NULL; - } + + if (io_task->scsi_cmnd) { + scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; } + } else { + if (!beiscsi_conn->login_in_progress) + beiscsi_free_mgmt_task_handles(beiscsi_conn, task); } } @@ -3909,8 +4629,6 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, struct beiscsi_offload_params *params) { struct wrb_handle *pwrb_handle; - struct iscsi_target_context_update_wrb *pwrb = NULL; - struct be_mem_descriptor *mem_descr; struct beiscsi_hba *phba = beiscsi_conn->phba; struct iscsi_task *task = beiscsi_conn->task; struct iscsi_session *session = task->conn->session; @@ -3921,80 +4639,28 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, * login/startup related tasks. */ beiscsi_conn->login_in_progress = 0; - spin_lock_bh(&session->lock); + spin_lock_bh(&session->back_lock); beiscsi_cleanup_task(task); - spin_unlock_bh(&session->lock); + spin_unlock_bh(&session->back_lock); - pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start)); - pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; - memset(pwrb, 0, sizeof(*pwrb)); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - max_burst_length, pwrb, params->dw[offsetof - (struct amap_beiscsi_offload_params, - max_burst_length) / 32]); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - max_send_data_segment_length, pwrb, - params->dw[offsetof(struct amap_beiscsi_offload_params, - max_send_data_segment_length) / 32]); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - first_burst_length, - pwrb, - params->dw[offsetof(struct amap_beiscsi_offload_params, - first_burst_length) / 32]); - - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb, - (params->dw[offsetof(struct amap_beiscsi_offload_params, - erl) / 32] & OFFLD_PARAMS_ERL)); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb, - (params->dw[offsetof(struct amap_beiscsi_offload_params, - dde) / 32] & OFFLD_PARAMS_DDE) >> 2); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb, - (params->dw[offsetof(struct amap_beiscsi_offload_params, - hde) / 32] & OFFLD_PARAMS_HDE) >> 3); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb, - (params->dw[offsetof(struct amap_beiscsi_offload_params, - ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb, - (params->dw[offsetof(struct amap_beiscsi_offload_params, - imd) / 32] & OFFLD_PARAMS_IMD) >> 5); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn, - pwrb, - (params->dw[offsetof(struct amap_beiscsi_offload_params, - exp_statsn) / 32] + 1)); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb, - 0x7); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx, - pwrb, pwrb_handle->wrb_index); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb, - pwrb, pwrb_handle->nxt_wrb_index); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - session_state, pwrb, 0); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack, - pwrb, 1); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq, - pwrb, 0); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb, - 0); + pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); - mem_descr = phba->init_mem; - mem_descr += ISCSI_MEM_GLOBAL_HEADER; - - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - pad_buffer_addr_hi, pwrb, - mem_descr->mem_array[0].bus_address.u.a32.address_hi); - AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, - pad_buffer_addr_lo, pwrb, - mem_descr->mem_array[0].bus_address.u.a32.address_lo); + /* Check for the adapter family */ + if (is_chip_be2_be3r(phba)) + beiscsi_offload_cxn_v0(params, pwrb_handle, + phba->init_mem); + else + beiscsi_offload_cxn_v2(params, pwrb_handle); - be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); + be_dws_le_to_cpu(pwrb_handle->pwrb, + sizeof(struct iscsi_target_context_update_wrb)); doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); } static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, @@ -4024,6 +4690,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; itt_t itt; + uint16_t cri_index = 0; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; @@ -4044,33 +4711,60 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) spin_lock(&phba->io_sgl_lock); io_task->psgl_handle = alloc_io_sgl_handle(phba); spin_unlock(&phba->io_sgl_lock); - if (!io_task->psgl_handle) + if (!io_task->psgl_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of IO_SGL_ICD Failed" + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); goto free_hndls; + } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); - if (!io_task->pwrb_handle) + beiscsi_conn->beiscsi_conn_cid); + if (!io_task->pwrb_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of WRB_HANDLE Failed" + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); goto free_io_hndls; + } } else { io_task->scsi_cmnd = NULL; if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { + beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = (struct sgl_handle *) alloc_mgmt_sgl_handle(phba); spin_unlock(&phba->mgmt_sgl_lock); - if (!io_task->psgl_handle) + if (!io_task->psgl_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | + BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of MGMT_SGL_ICD Failed" + "for the CID : %d\n", + beiscsi_conn-> + beiscsi_conn_cid); goto free_hndls; + } beiscsi_conn->login_in_progress = 1; beiscsi_conn->plogin_sgl_handle = io_task->psgl_handle; io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); - if (!io_task->pwrb_handle) - goto free_io_hndls; + beiscsi_conn->beiscsi_conn_cid); + if (!io_task->pwrb_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | + BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of WRB_HANDLE Failed" + "for the CID : %d\n", + beiscsi_conn-> + beiscsi_conn_cid); + goto free_mgmt_hndls; + } beiscsi_conn->plogin_wrb_handle = io_task->pwrb_handle; @@ -4080,19 +4774,31 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->pwrb_handle = beiscsi_conn->plogin_wrb_handle; } - beiscsi_conn->task = task; } else { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); spin_unlock(&phba->mgmt_sgl_lock); - if (!io_task->psgl_handle) + if (!io_task->psgl_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | + BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of MGMT_SGL_ICD Failed" + "for the CID : %d\n", + beiscsi_conn-> + beiscsi_conn_cid); goto free_hndls; + } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); - if (!io_task->pwrb_handle) + beiscsi_conn->beiscsi_conn_cid); + if (!io_task->pwrb_handle) { + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, + "BM_%d : Alloc of WRB_HANDLE Failed" + "for the CID : %d\n", + beiscsi_conn->beiscsi_conn_cid); goto free_mgmt_hndls; + } } } @@ -4112,23 +4818,77 @@ free_io_hndls: free_mgmt_hndls: spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; spin_unlock(&phba->mgmt_sgl_lock); free_hndls: phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[ - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, io_task->bhs_pa.u.a64.address); io_task->cmd_bhs = NULL; - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, - "BM_%d : Alloc of SGL_ICD Failed\n"); return -ENOMEM; } +int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, + unsigned int num_sg, unsigned int xferlen, + unsigned int writedir) +{ + + struct beiscsi_io_task *io_task = task->dd_data; + struct iscsi_conn *conn = task->conn; + struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct iscsi_wrb *pwrb = NULL; + unsigned int doorbell = 0; + + pwrb = io_task->pwrb_handle->pwrb; + + io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; + io_task->bhs_len = sizeof(struct be_cmd_bhs); + + if (writedir) { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, + INI_WR_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, + INI_RD_CMD); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); + } + + io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, + type, pwrb); + + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, + cpu_to_be16(*(unsigned short *) + &io_task->cmd_bhs->iscsi_hdr.lun)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + + hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + + be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); + + doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; + doorbell |= (io_task->pwrb_handle->wrb_index & + DB_DEF_PDU_WRB_INDEX_MASK) << + DB_DEF_PDU_WRB_INDEX_SHIFT; + doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); + return 0; +} static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, unsigned int num_sg, unsigned int xferlen, @@ -4156,6 +4916,9 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); } + io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, + type, pwrb); + AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, cpu_to_be16(*(unsigned short *) &io_task->cmd_bhs->iscsi_hdr.lun)); @@ -4178,7 +4941,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4191,55 +4955,75 @@ static int beiscsi_mtask(struct iscsi_task *task) struct iscsi_wrb *pwrb = NULL; unsigned int doorbell = 0; unsigned int cid; + unsigned int pwrb_typeoffset = 0; cid = beiscsi_conn->beiscsi_conn_cid; pwrb = io_task->pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); - AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, - be32_to_cpu(task->cmdsn)); - AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, - io_task->pwrb_handle->wrb_index); - AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, - io_task->psgl_handle->sgl_index); + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + pwrb_typeoffset = BE_WRB_TYPE_OFFSET; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; + } + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - TGT_DM_CMD); - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); + ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - TGT_DM_CMD); - AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, - pwrb, 0); - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); + ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, + dmsg, pwrb, 1); + else + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + dmsg, pwrb, 1); } else { - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - INI_RD_CMD); - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); + ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, + dmsg, pwrb, 0); + else + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + dmsg, pwrb, 0); } hwi_write_buffer(pwrb, task); break; case ISCSI_OP_TEXT: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - TGT_DM_CMD); - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); + ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_SCSI_TMFUNC: - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - INI_TMF_CMD); - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); + ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); hwi_write_buffer(pwrb, task); break; case ISCSI_OP_LOGOUT: - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); - AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - HWH_TYPE_LOGOUT); + ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); hwi_write_buffer(pwrb, task); break; @@ -4251,17 +5035,17 @@ static int beiscsi_mtask(struct iscsi_task *task) return -EINVAL; } - AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, - task->data_count); - AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, - io_task->pwrb_handle->nxt_wrb_index); - be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); + /* Set the task type */ + io_task->wrb_type = (is_chip_be2_be3r(phba)) ? + AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : + AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); + iowrite32(doorbell, phba->db_va + + beiscsi_conn->doorbell_offset); return 0; } @@ -4269,10 +5053,13 @@ static int beiscsi_task_xmit(struct iscsi_task *task) { struct beiscsi_io_task *io_task = task->dd_data; struct scsi_cmnd *sc = task->sc; + struct beiscsi_hba *phba = NULL; struct scatterlist *sg; int num_sg; unsigned int writedir = 0, xferlen = 0; + phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba; + if (!sc) return beiscsi_mtask(task); @@ -4283,8 +5070,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task) struct beiscsi_hba *phba = NULL; phba = ((struct beiscsi_conn *)conn->dd_data)->phba; - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, - "BM_%d : scsi_dma_map Failed\n"); + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, + "BM_%d : scsi_dma_map Failed " + "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", + be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), + io_task->libiscsi_itt, scsi_bufflen(sc)); return num_sg; } @@ -4295,7 +5086,7 @@ static int beiscsi_task_xmit(struct iscsi_task *task) else writedir = 0; - return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); + return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); } /** @@ -4326,20 +5117,24 @@ static int beiscsi_bsg_request(struct bsg_job *job) beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BM_%d : Failed to allocate memory for " "beiscsi_bsg_request\n"); - return -EIO; + return -ENOMEM; } tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, &nonemb_cmd); if (!tag) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BM_%d : be_cmd_get_mac_addr Failed\n"); + "BM_%d : MBX Tag Allocation Failed\n"); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return -EAGAIN; - } else - wait_event_interruptible(phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag]); + } + + rc = wait_event_interruptible_timeout( + phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_numtag[tag], + msecs_to_jiffies( + BEISCSI_HOST_MBX_TIMEOUT)); extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; free_mcc_tag(&phba->ctrl, tag); @@ -4356,11 +5151,13 @@ static int beiscsi_bsg_request(struct bsg_job *job) nonemb_cmd.va, nonemb_cmd.dma); if (status || extd_status) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BM_%d : be_cmd_get_mac_addr Failed" + "BM_%d : MBX Cmd Failed" " status = %d extd_status = %d\n", status, extd_status); return -EIO; + } else { + rc = 0; } break; @@ -4380,14 +5177,20 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) beiscsi_log_enable_init(phba, beiscsi_log_enable); } -static void beiscsi_quiesce(struct beiscsi_hba *phba) +/* + * beiscsi_quiesce()- Cleanup Driver resources + * @phba: Instance Priv structure + * @unload_state:i Clean or EEH unload state + * + * Free the OS and HW resources held by the driver + **/ +static void beiscsi_quiesce(struct beiscsi_hba *phba, + uint32_t unload_state) { struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_eq_obj *pbe_eq; unsigned int i, msix_vec; - u8 *real_offset = 0; - u32 value = 0; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; @@ -4395,35 +5198,38 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba) if (phba->msix_enabled) { for (i = 0; i <= phba->num_cpus; i++) { msix_vec = phba->msix_entries[i].vector; + synchronize_irq(msix_vec); free_irq(msix_vec, &phwi_context->be_eq[i]); kfree(phba->msi_name[i]); } } else - if (phba->pcidev->irq) + if (phba->pcidev->irq) { + synchronize_irq(phba->pcidev->irq); free_irq(phba->pcidev->irq, phba); - pci_disable_msix(phba->pcidev); - destroy_workqueue(phba->wq); - if (blk_iopoll_enabled) - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); } + pci_disable_msix(phba->pcidev); - beiscsi_clean_port(phba); - beiscsi_free_mem(phba); - real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } - value = readl((void *)real_offset); + if (unload_state == BEISCSI_CLEAN_UNLOAD) { + destroy_workqueue(phba->wq); + beiscsi_clean_port(phba); + beiscsi_free_mem(phba); - if (value & 0x00010000) { - value &= 0xfffeffff; - writel(value, (void *)real_offset); + beiscsi_unmap_pci_function(phba); + pci_free_consistent(phba->pcidev, + phba->ctrl.mbox_mem_alloced.size, + phba->ctrl.mbox_mem_alloced.va, + phba->ctrl.mbox_mem_alloced.dma); + } else { + hwi_purge_eq(phba); + hwi_cleanup(phba); } - beiscsi_unmap_pci_function(phba); - pci_free_consistent(phba->pcidev, - phba->ctrl.mbox_mem_alloced.size, - phba->ctrl.mbox_mem_alloced.va, - phba->ctrl.mbox_mem_alloced.dma); + + cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); } static void beiscsi_remove(struct pci_dev *pcidev) @@ -4438,11 +5244,13 @@ static void beiscsi_remove(struct pci_dev *pcidev) } beiscsi_destroy_def_ifaces(phba); - beiscsi_quiesce(phba); + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); iscsi_boot_destroy_kset(phba->boot_kset); iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); + pci_disable_pcie_error_reporting(pcidev); + pci_set_drvdata(pcidev, NULL); pci_disable_device(pcidev); } @@ -4457,7 +5265,9 @@ static void beiscsi_shutdown(struct pci_dev *pcidev) return; } - beiscsi_quiesce(phba); + phba->state = BE_ADAPTER_STATE_SHUTDOWN; + iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); + beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); pci_disable_device(pcidev); } @@ -4476,16 +5286,233 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba) return; } -static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, - const struct pci_device_id *id) +static void be_eqd_update(struct beiscsi_hba *phba) +{ + struct be_set_eqd set_eqd[MAX_CPUS]; + struct be_aic_obj *aic; + struct be_eq_obj *pbe_eq; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + int eqd, i, num = 0; + ulong now; + u32 pps, delta; + unsigned int tag; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i <= phba->num_cpus; i++) { + aic = &phba->aic_obj[i]; + pbe_eq = &phwi_context->be_eq[i]; + now = jiffies; + if (!aic->jiffs || time_before(now, aic->jiffs) || + pbe_eq->cq_count < aic->eq_prev) { + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + continue; + } + delta = jiffies_to_msecs(now - aic->jiffs); + pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); + eqd = (pps / 1500) << 2; + + if (eqd < 8) + eqd = 0; + eqd = min_t(u32, eqd, phwi_context->max_eqd); + eqd = max_t(u32, eqd, phwi_context->min_eqd); + + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + + if (eqd != aic->prev_eqd) { + set_eqd[num].delay_multiplier = (eqd * 65)/100; + set_eqd[num].eq_id = pbe_eq->q.id; + aic->prev_eqd = eqd; + num++; + } + } + if (num) { + tag = be_cmd_modify_eq_delay(phba, set_eqd, num); + if (tag) + beiscsi_mccq_compl(phba, tag, NULL, NULL); + } +} + +/* + * beiscsi_hw_health_check()- Check adapter health + * @work: work item to check HW health + * + * Check if adapter in an unrecoverable state or not. + **/ +static void +beiscsi_hw_health_check(struct work_struct *work) +{ + struct beiscsi_hba *phba = + container_of(work, struct beiscsi_hba, + beiscsi_hw_check_task.work); + + be_eqd_update(phba); + + beiscsi_ue_detect(phba); + + schedule_delayed_work(&phba->beiscsi_hw_check_task, + msecs_to_jiffies(1000)); +} + + +static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct beiscsi_hba *phba = NULL; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + phba->state |= BE_ADAPTER_PCI_ERR; + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH error detected\n"); + + beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); + + if (state == pci_channel_io_perm_failure) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH : State PERM Failure"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_disable_device(pdev); + + /* The error could cause the FW to trigger a flash debug dump. + * Resetting the card while flash dump is in progress + * can cause it not to recover; wait for it to finish. + * Wait only for first function as it is needed only once per + * adapter. + **/ + if (pdev->devfn == 0) + ssleep(30); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) +{ + struct beiscsi_hba *phba = NULL; + int status = 0; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset\n"); + + status = pci_enable_device(pdev); + if (status) + return PCI_ERS_RESULT_DISCONNECT; + + pci_set_master(pdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + /* Wait for the CHIP Reset to complete */ + status = be_chk_reset_complete(phba); + if (!status) { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completed\n"); + } else { + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : EEH Reset Completion Failure\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + return PCI_ERS_RESULT_RECOVERED; +} + +static void beiscsi_eeh_resume(struct pci_dev *pdev) +{ + int ret = 0, i; + struct be_eq_obj *pbe_eq; + struct beiscsi_hba *phba = NULL; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + pci_save_state(pdev); + + if (enable_msix) + find_num_cpus(phba); + else + phba->num_cpus = 1; + + if (enable_msix) { + beiscsi_msix_enable(phba); + if (!phba->msix_enabled) + phba->num_cpus = 1; + } + + ret = beiscsi_cmd_reset_function(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Reset Failed\n"); + goto ret_err; + } + + ret = be_chk_reset_complete(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to get out of reset.\n"); + goto ret_err; + } + + beiscsi_get_params(phba); + phba->shost->max_id = phba->params.cxns_per_ctrl; + phba->shost->can_queue = phba->params.ios_per_ctrl; + ret = hwi_init_controller(phba); + + for (i = 0; i < MAX_MCC_CMD; i++) { + init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); + phba->ctrl.mcc_tag[i] = i + 1; + phba->ctrl.mcc_numtag[i + 1] = 0; + phba->ctrl.mcc_tag_available++; + } + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); + } + + i = (phba->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + + ret = beiscsi_init_irqs(phba); + if (ret < 0) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : beiscsi_eeh_resume - " + "Failed to beiscsi_init_irqs\n"); + goto ret_err; + } + + hwi_enable_intr(phba); + phba->state &= ~BE_ADAPTER_PCI_ERR; + + return; +ret_err: + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : AER EEH Resume Failed\n"); +} + +static int beiscsi_dev_probe(struct pci_dev *pcidev, + const struct pci_device_id *id) { struct beiscsi_hba *phba = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_eq_obj *pbe_eq; - int ret, num_cpus, i; - u8 *real_offset = 0; - u32 value = 0; + int ret = 0, i; ret = beiscsi_enable_pci(pcidev); if (ret < 0) { @@ -4501,37 +5528,42 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, goto disable_pci; } + /* Enable EEH reporting */ + ret = pci_enable_pcie_error_reporting(pcidev); + if (ret) + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, + "BM_%d : PCIe Error Reporting " + "Enabling Failed\n"); + + pci_save_state(pcidev); + /* Initialize Driver configuration Paramters */ beiscsi_hba_attrs_init(phba); + phba->fw_timeout = false; + phba->mac_addr_set = false; + + switch (pcidev->device) { case BE_DEVICE_ID1: case OC_DEVICE_ID1: case OC_DEVICE_ID2: phba->generation = BE_GEN2; + phba->iotask_fn = beiscsi_iotask; break; case BE_DEVICE_ID2: case OC_DEVICE_ID3: phba->generation = BE_GEN3; + phba->iotask_fn = beiscsi_iotask; + break; + case OC_SKH_ID1: + phba->generation = BE_GEN4; + phba->iotask_fn = beiscsi_iotask_v2; break; default: phba->generation = 0; } - if (enable_msix) - num_cpus = find_num_cpus(); - else - num_cpus = 1; - phba->num_cpus = num_cpus; - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, - "BM_%d : num_cpus = %d\n", - phba->num_cpus); - - if (enable_msix) { - beiscsi_msix_enable(phba); - if (!phba->msix_enabled) - phba->num_cpus = 1; - } ret = be_ctrl_init(phba, pcidev); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -4540,43 +5572,46 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, goto hba_free; } - if (!num_hba) { - real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; - value = readl((void *)real_offset); - if (value & 0x00010000) { - gcrashmode++; - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Loading Driver in crashdump mode\n"); - ret = beiscsi_cmd_reset_function(phba); - if (ret) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Reset Failed. Aborting Crashdump\n"); - goto hba_free; - } - ret = be_chk_reset_complete(phba); - if (ret) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, - "BM_%d : Failed to get out of reset." - "Aborting Crashdump\n"); - goto hba_free; - } - } else { - value |= 0x00010000; - writel(value, (void *)real_offset); - num_hba++; - } + ret = beiscsi_cmd_reset_function(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Reset Failed\n"); + goto hba_free; + } + ret = be_chk_reset_complete(phba); + if (ret) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to get out of reset.\n"); + goto hba_free; } spin_lock_init(&phba->io_sgl_lock); spin_lock_init(&phba->mgmt_sgl_lock); spin_lock_init(&phba->isr_lock); + spin_lock_init(&phba->async_pdu_lock); ret = mgmt_get_fw_config(&phba->ctrl, phba); if (ret != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Error getting fw config\n"); goto free_port; } - phba->shost->max_id = phba->fw_config.iscsi_cid_count; + + if (enable_msix) + find_num_cpus(phba); + else + phba->num_cpus = 1; + + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, + "BM_%d : num_cpus = %d\n", + phba->num_cpus); + + if (enable_msix) { + beiscsi_msix_enable(phba); + if (!phba->msix_enabled) + phba->num_cpus = 1; + } + + phba->shost->max_id = phba->params.cxns_per_ctrl; beiscsi_get_params(phba); phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_init_port(phba); @@ -4587,18 +5622,20 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, goto free_port; } - for (i = 0; i < MAX_MCC_CMD ; i++) { + for (i = 0; i < MAX_MCC_CMD; i++) { init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); phba->ctrl.mcc_tag[i] = i + 1; phba->ctrl.mcc_numtag[i + 1] = 0; phba->ctrl.mcc_tag_available++; + memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, + sizeof(struct be_dma_mem)); } phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; - snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", + snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", phba->shost->host_no); - phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); + phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); if (!phba->wq) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : beiscsi_dev_probe-" @@ -4606,18 +5643,24 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, goto free_twq; } - INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); + INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task, + beiscsi_hw_health_check); phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - if (blk_iopoll_enabled) { - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, - be_iopoll); - blk_iopoll_enable(&pbe_eq->iopoll); - } + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); } + + i = (phba->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + ret = beiscsi_init_irqs(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -4627,6 +5670,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, } hwi_enable_intr(phba); + if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) + goto free_blkenbld; + if (beiscsi_setup_boot_info(phba)) /* * log error but continue, because we may not be using @@ -4637,30 +5683,23 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, "iSCSI boot info.\n"); beiscsi_create_def_ifaces(phba); + schedule_delayed_work(&phba->beiscsi_hw_check_task, + msecs_to_jiffies(1000)); + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); return 0; free_blkenbld: destroy_workqueue(phba->wq); - if (blk_iopoll_enabled) - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); - } + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } free_twq: beiscsi_clean_port(phba); beiscsi_free_mem(phba); free_port: - real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; - - value = readl((void *)real_offset); - - if (value & 0x00010000) { - value &= 0xfffeffff; - writel(value, (void *)real_offset); - } - pci_free_consistent(phba->pcidev, phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.va, @@ -4677,6 +5716,12 @@ disable_pci: return ret; } +static struct pci_error_handlers beiscsi_eeh_handlers = { + .error_detected = beiscsi_eeh_err_detected, + .slot_reset = beiscsi_eeh_reset, + .resume = beiscsi_eeh_resume, +}; + struct iscsi_transport beiscsi_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_NAME, @@ -4715,7 +5760,8 @@ static struct pci_driver beiscsi_pci_driver = { .probe = beiscsi_dev_probe, .remove = beiscsi_remove, .shutdown = beiscsi_shutdown, - .id_table = beiscsi_pci_id_table + .id_table = beiscsi_pci_id_table, + .err_handler = &beiscsi_eeh_handlers }; |
