aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/bnx2fc/bnx2fc_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/bnx2fc/bnx2fc_io.c')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c96
1 files changed, 60 insertions, 36 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4f7453b9e41..7bc47fc7c68 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
* IO manager and SCSI IO processing.
*
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -239,8 +239,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
sc_cmd->scsi_done(sc_cmd);
}
-struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
- u16 min_xid, u16 max_xid)
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
{
struct bnx2fc_cmd_mgr *cmgr;
struct io_bdt *bdt_info;
@@ -252,6 +251,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
int num_ios, num_pri_ios;
size_t bd_tbl_sz;
int arr_sz = num_possible_cpus() + 1;
+ u16 min_xid = BNX2FC_MIN_XID;
+ u16 max_xid = hba->max_xid;
if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
@@ -281,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
arr_sz, GFP_KERNEL);
if (!cmgr->free_list_lock) {
printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+ kfree(cmgr->free_list);
+ cmgr->free_list = NULL;
goto mem_err;
}
@@ -298,7 +301,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
* of slow path requests.
*/
xid = BNX2FC_MIN_XID;
- num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS;
+ num_pri_ios = num_ios - hba->elstm_xids;
for (i = 0; i < num_ios; i++) {
io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
@@ -367,7 +370,7 @@ void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
struct bnx2fc_hba *hba = cmgr->hba;
size_t bd_tbl_sz;
u16 min_xid = BNX2FC_MIN_XID;
- u16 max_xid = BNX2FC_MAX_XID;
+ u16 max_xid = hba->max_xid;
int num_ios;
int i;
@@ -405,11 +408,10 @@ free_cmd_pool:
goto free_cmgr;
for (i = 0; i < num_possible_cpus() + 1; i++) {
- struct list_head *list;
- struct list_head *tmp;
+ struct bnx2fc_cmd *tmp, *io_req;
- list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
- struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
+ list_for_each_entry_safe(io_req, tmp,
+ &cmgr->free_list[i], link) {
list_del(&io_req->link);
kfree(io_req);
}
@@ -594,13 +596,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = NULL;
}
if (mp_req->req_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->req_buf,
mp_req->req_buf_dma);
mp_req->req_buf = NULL;
}
if (mp_req->resp_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->resp_buf,
mp_req->resp_buf_dma);
mp_req->resp_buf = NULL;
@@ -622,7 +624,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->req_len = sizeof(struct fcp_cmnd);
io_req->data_xfer_len = mp_req->req_len;
- mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->req_buf_dma,
GFP_ATOMIC);
if (!mp_req->req_buf) {
@@ -631,7 +633,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
return FAILED;
}
- mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->resp_buf_dma,
GFP_ATOMIC);
if (!mp_req->resp_buf) {
@@ -639,8 +641,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
bnx2fc_free_mp_resc(io_req);
return FAILED;
}
- memset(mp_req->req_buf, 0, PAGE_SIZE);
- memset(mp_req->resp_buf, 0, PAGE_SIZE);
+ memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
/* Allocate and map mp_req_bd and mp_resp_bd */
sz = sizeof(struct fcoe_bd_ctx);
@@ -655,7 +657,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
&mp_req->mp_resp_bd_dma,
GFP_ATOMIC);
- if (!mp_req->mp_req_bd) {
+ if (!mp_req->mp_resp_bd) {
printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
bnx2fc_free_mp_resc(io_req);
return FAILED;
@@ -665,7 +667,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->buf_len = CNIC_PAGE_SIZE;
mp_req_bd->flags = 0;
/*
@@ -677,7 +679,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
mp_resp_bd->flags = 0;
return SUCCESS;
@@ -686,8 +688,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
{
struct fc_lport *lport;
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
struct fcoe_port *port;
struct bnx2fc_interface *interface;
struct bnx2fc_rport *tgt;
@@ -705,6 +707,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
unsigned long start = jiffies;
lport = shost_priv(host);
+ rport = starget_to_rport(scsi_target(sc_cmd->device));
port = lport_priv(lport);
interface = port->priv;
@@ -713,6 +716,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
rc = FAILED;
goto tmf_err;
}
+ rp = rport->dd_data;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
@@ -1244,6 +1248,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
rc = bnx2fc_expl_logo(lport, io_req);
+ /* This only occurs when an task abort was requested while ABTS
+ is in progress. Setting the IO_CLEANUP flag will skip the
+ RRQ process in the case when the fw generated SCSI_CMD cmpl
+ was a result from the ABTS request rather than the CLEANUP
+ request */
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
goto out;
}
@@ -1268,8 +1278,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
- if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
- &io_req->req_flags))) {
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
+ rc = SUCCESS;
+ } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
@@ -1436,9 +1449,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct bnx2fc_rport *tgt = io_req->tgt;
- struct list_head *list;
- struct list_head *tmp;
- struct bnx2fc_cmd *cmd;
+ struct bnx2fc_cmd *cmd, *tmp;
int tm_lun = sc_cmd->device->lun;
int rc = 0;
int lun;
@@ -1449,9 +1460,8 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
* Walk thru the active_ios queue and ABORT the IO
* that matches with the LUN that was reset
*/
- list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
- cmd = (struct bnx2fc_cmd *)list;
lun = cmd->sc_cmd->device->lun;
if (lun == tm_lun) {
/* Initiate ABTS on this cmd */
@@ -1476,9 +1486,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
- struct list_head *list;
- struct list_head *tmp;
- struct bnx2fc_cmd *cmd;
+ struct bnx2fc_cmd *cmd, *tmp;
int rc = 0;
/* called with tgt_lock held */
@@ -1487,9 +1495,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
* Walk thru the active_ios queue and ABORT the IO
* that matches with the LUN that was reset
*/
- list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
- cmd = (struct bnx2fc_cmd *)list;
/* Initiate ABTS */
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
&cmd->req_flags)) {
@@ -1814,7 +1821,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
}
- memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
@@ -1866,7 +1873,15 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
rc = SCSI_MLQUEUE_TARGET_BUSY;
goto exit_qcmd;
}
-
+ if (tgt->retry_delay_timestamp) {
+ if (time_after(jiffies, tgt->retry_delay_timestamp)) {
+ tgt->retry_delay_timestamp = 0;
+ } else {
+ /* If retry_delay timer is active, flow off the ML */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+ }
io_req = bnx2fc_cmd_alloc(tgt);
if (!io_req) {
rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -1956,6 +1971,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
" fcp_resid = 0x%x\n",
io_req->cdb_status, io_req->fcp_resid);
sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+ if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
+ io_req->cdb_status == SAM_STAT_BUSY) {
+ /* Set the jiffies + retry_delay_timer * 100ms
+ for the rport/tgt */
+ tgt->retry_delay_timestamp = jiffies +
+ fcp_rsp->retry_delay_timer * HZ / 10;
+ }
+
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
@@ -1980,7 +2004,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct fc_lport *lport = port->lport;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
int task_idx, index;
u16 xid;
@@ -1991,7 +2015,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
sc_cmd->SCp.ptr = (char *)io_req;
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
io_req->io_req_flags = BNX2FC_READ;
stats->InputRequests++;