diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_inline.h')
| -rw-r--r-- | drivers/scsi/qla2xxx/qla_inline.h | 336 |
1 files changed, 223 insertions, 113 deletions
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index ecc3741a452..b3b1d6fc2d6 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -1,11 +1,33 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2005 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ -static __inline__ uint16_t qla2x00_debounce_register(volatile uint16_t __iomem *); +#include "qla_target.h" +/** + * qla24xx_calc_iocbs() - Determine number of Command Type 3 and + * Continuation Type 1 IOCBs to allocate. + * + * @dsds: number of data segment decriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +static inline uint16_t +qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > 1) { + iocbs += (dsds - 1) / 5; + if ((dsds - 1) % 5) + iocbs++; + } + return iocbs; +} + /* * qla2x00_debounce_register * Debounce register. @@ -32,140 +54,228 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr) return (first); } -static __inline__ int qla2x00_normalize_dma_addr( - dma_addr_t *e_addr, uint32_t *e_len, - dma_addr_t *ne_addr, uint32_t *ne_len); +static inline void +qla2x00_poll(struct rsp_que *rsp) +{ + unsigned long flags; + struct qla_hw_data *ha = rsp->hw; + local_irq_save(flags); + if (IS_P3P_TYPE(ha)) + qla82xx_poll(0, rsp); + else + ha->isp_ops->intr_handler(0, rsp); + local_irq_restore(flags); +} -/** - * qla2x00_normalize_dma_addr() - Normalize an DMA address. - * @e_addr: Raw DMA address - * @e_len: Raw DMA length - * @ne_addr: Normalized second DMA address - * @ne_len: Normalized second DMA length - * - * If the address does not span a 4GB page boundary, the contents of @ne_addr - * and @ne_len are undefined. @e_len is updated to reflect a normalization. - * - * Example: - * - * ffffabc0ffffeeee (e_addr) start of DMA address - * 0000000020000000 (e_len) length of DMA transfer - * ffffabc11fffeeed end of DMA transfer - * - * Is the 4GB boundary crossed? - * - * ffffabc0ffffeeee (e_addr) - * ffffabc11fffeeed (e_addr + e_len - 1) - * 00000001e0000003 ((e_addr ^ (e_addr + e_len - 1)) - * 0000000100000000 ((e_addr ^ (e_addr + e_len - 1)) & ~(0xffffffff) - * - * Compute start of second DMA segment: - * - * ffffabc0ffffeeee (e_addr) - * ffffabc1ffffeeee (0x100000000 + e_addr) - * ffffabc100000000 (0x100000000 + e_addr) & ~(0xffffffff) - * ffffabc100000000 (ne_addr) - * - * Compute length of second DMA segment: - * - * 00000000ffffeeee (e_addr & 0xffffffff) - * 0000000000001112 (0x100000000 - (e_addr & 0xffffffff)) - * 000000001fffeeee (e_len - (0x100000000 - (e_addr & 0xffffffff)) - * 000000001fffeeee (ne_len) - * - * Adjust length of first DMA segment - * - * 0000000020000000 (e_len) - * 0000000000001112 (e_len - ne_len) - * 0000000000001112 (e_len) - * - * Returns non-zero if the specified address was normalized, else zero. - */ -static __inline__ int -qla2x00_normalize_dma_addr( - dma_addr_t *e_addr, uint32_t *e_len, - dma_addr_t *ne_addr, uint32_t *ne_len) +static inline uint8_t * +host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) { - int normalized; + uint32_t *ifcp = (uint32_t *) fcp; + uint32_t *ofcp = (uint32_t *) fcp; + uint32_t iter = bsize >> 2; - normalized = 0; - if ((*e_addr ^ (*e_addr + *e_len - 1)) & ~(0xFFFFFFFFULL)) { - /* Compute normalized crossed address and len */ - *ne_addr = (0x100000000ULL + *e_addr) & ~(0xFFFFFFFFULL); - *ne_len = *e_len - (0x100000000ULL - (*e_addr & 0xFFFFFFFFULL)); - *e_len -= *ne_len; + for (; iter ; iter--) + *ofcp++ = swab32(*ifcp++); - normalized++; - } - return (normalized); + return fcp; } -static __inline__ void qla2x00_poll(scsi_qla_host_t *); static inline void -qla2x00_poll(scsi_qla_host_t *ha) +host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize) { - ha->isp_ops.intr_handler(0, ha, NULL); + uint32_t *isrc = (uint32_t *) src; + __le32 *odest = (__le32 *) dst; + uint32_t iter = bsize >> 2; + + for (; iter ; iter--) + *odest++ = cpu_to_le32(*isrc++); } -static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *); -/* - * This routine will wait for fabric devices for - * the reset delay. - */ -static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha) +static inline void +qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha) { - uint16_t fw_state; + int i; + + if (IS_FWI2_CAPABLE(ha)) + return; - qla2x00_get_firmware_state(ha, &fw_state); + for (i = 0; i < SNS_FIRST_LOOP_ID; i++) + set_bit(i, ha->loop_id_map); + set_bit(MANAGEMENT_SERVER, ha->loop_id_map); + set_bit(BROADCAST, ha->loop_id_map); +} + +static inline int +qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id) +{ + struct qla_hw_data *ha = vha->hw; + if (IS_FWI2_CAPABLE(ha)) + return (loop_id > NPH_LAST_HANDLE); + + return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) || + loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); +} + +static inline void +qla2x00_clear_loop_id(fc_port_t *fcport) { + struct qla_hw_data *ha = fcport->vha->hw; + + if (fcport->loop_id == FC_NO_LOOP_ID || + qla2x00_is_reserved_id(fcport->vha, fcport->loop_id)) + return; + + clear_bit(fcport->loop_id, ha->loop_id_map); + fcport->loop_id = FC_NO_LOOP_ID; +} + +static inline void +qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, + struct qla_tgt_cmd *tc) +{ + struct dsd_dma *dsd_ptr, *tdsd_ptr; + struct crc_context *ctx; + + if (sp) + ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); + else if (tc) + ctx = (struct crc_context *)tc->ctx; + else { + BUG(); + return; + } + + /* clean up allocated prev pool */ + list_for_each_entry_safe(dsd_ptr, tdsd_ptr, + &ctx->dsd_list, list) { + dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, + dsd_ptr->dsd_list_dma); + list_del(&dsd_ptr->list); + kfree(dsd_ptr); + } + INIT_LIST_HEAD(&ctx->dsd_list); +} + +static inline void +qla2x00_set_fcport_state(fc_port_t *fcport, int state) +{ + int old_state; + + old_state = atomic_read(&fcport->state); + atomic_set(&fcport->state, state); + + /* Don't print state transitions during initial allocation of fcport */ + if (old_state && old_state != state) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x207d, + "FCPort state transitioned from %s to %s - " + "portid=%02x%02x%02x.\n", + port_state_str[old_state], port_state_str[state], + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + } } -/** - * qla2x00_issue_marker() - Issue a Marker IOCB if necessary. - * @ha: HA context - * @ha_locked: is function called with the hardware lock - * - * Returns non-zero if a failure occured, else zero. - */ static inline int -qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked) -{ - /* Send marker if required */ - if (ha->marker_needed != 0) { - if (ha_locked) { - if (__qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != - QLA_SUCCESS) - return (QLA_FUNCTION_FAILED); - } else { - if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != - QLA_SUCCESS) - return (QLA_FUNCTION_FAILED); - } - ha->marker_needed = 0; +qla2x00_hba_err_chk_enabled(srb_t *sp) +{ + /* + * Uncomment when corresponding SCSI changes are done. + * + if (!sp->cmd->prot_chk) + return 0; + * + */ + switch (scsi_get_prot_op(GET_CMD_SP(sp))) { + case SCSI_PROT_READ_STRIP: + case SCSI_PROT_WRITE_INSERT: + if (ql2xenablehba_err_chk >= 1) + return 1; + break; + case SCSI_PROT_READ_PASS: + case SCSI_PROT_WRITE_PASS: + if (ql2xenablehba_err_chk >= 2) + return 1; + break; + case SCSI_PROT_READ_INSERT: + case SCSI_PROT_WRITE_STRIP: + return 1; } - return (QLA_SUCCESS); + return 0; } -static inline uint8_t *host_to_fcp_swap(uint8_t *, uint32_t); -static inline uint8_t * -host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) +static inline int +qla2x00_reset_active(scsi_qla_host_t *vha) { - uint32_t *ifcp = (uint32_t *) fcp; - uint32_t *ofcp = (uint32_t *) fcp; - uint32_t iter = bsize >> 2; + scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); - for (; iter ; iter--) - *ofcp++ = swab32(*ifcp++); + /* Test appropriate base-vha and vha flags. */ + return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) || + test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || + test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || + test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); +} - return fcp; +static inline srb_t * +qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) +{ + srb_t *sp = NULL; + struct qla_hw_data *ha = vha->hw; + uint8_t bail; + + QLA_VHA_MARK_BUSY(vha, bail); + if (unlikely(bail)) + return NULL; + + sp = mempool_alloc(ha->srb_mempool, flag); + if (!sp) + goto done; + + memset(sp, 0, sizeof(*sp)); + sp->fcport = fcport; + sp->iocbs = 1; +done: + if (!sp) + QLA_VHA_MARK_NOT_BUSY(vha); + return sp; +} + +static inline void +qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp) +{ + mempool_free(sp, vha->hw->srb_mempool); + QLA_VHA_MARK_NOT_BUSY(vha); +} + +static inline void +qla2x00_init_timer(srb_t *sp, unsigned long tmo) +{ + init_timer(&sp->u.iocb_cmd.timer); + sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; + sp->u.iocb_cmd.timer.data = (unsigned long)sp; + sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; + add_timer(&sp->u.iocb_cmd.timer); + sp->free = qla2x00_sp_free; + if ((IS_QLAFX00(sp->fcport->vha->hw)) && + (sp->type == SRB_FXIOCB_DCMD)) + init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); } -static inline int qla2x00_is_reserved_id(scsi_qla_host_t *, uint16_t); static inline int -qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) +qla2x00_gid_list_size(struct qla_hw_data *ha) { - if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) - return (loop_id > NPH_LAST_HANDLE); + if (IS_QLAFX00(ha)) + return sizeof(uint32_t) * 32; + else + return sizeof(struct gid_list_info) * ha->max_fibre_devices; +} - return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) || - loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); -}; +static inline void +qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) +{ + if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && + (status & MBX_INTERRUPT) && ha->flags.mbox_int) { + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + complete(&ha->mbx_intr_comp); + } +} |
