aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/libfc/fc_fcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/libfc/fc_fcp.c')
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2131
1 files changed, 2131 insertions, 0 deletions
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 00000000000..404e63ff46b
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2131 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/crc32.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+#include <scsi/fc_encode.h>
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL");
+
+static int fc_fcp_debug;
+
+#define FC_DEBUG_FCP(fmt...) \
+ do { \
+ if (fc_fcp_debug) \
+ FC_DBG(fmt); \
+ } while (0)
+
+static struct kmem_cache *scsi_pkt_cachep;
+
+/* SRB state definitions */
+#define FC_SRB_FREE 0 /* cmd is free */
+#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
+#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
+#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
+#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
+#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
+#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
+#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
+#define FC_SRB_NOMEM (1 << 7) /* dropped to out of mem */
+
+#define FC_SRB_READ (1 << 1)
+#define FC_SRB_WRITE (1 << 0)
+
+/*
+ * The SCp.ptr should be tested and set under the host lock. NULL indicates
+ * that the command has been retruned to the scsi layer.
+ */
+#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
+#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
+#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
+
+struct fc_fcp_internal {
+ mempool_t *scsi_pkt_pool;
+ struct list_head scsi_pkt_queue;
+ u8 throttled;
+};
+
+#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
+
+/*
+ * function prototypes
+ * FC scsi I/O related functions
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
+static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
+static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_timeout(unsigned long data);
+static void fc_fcp_rec(struct fc_fcp_pkt *);
+static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_io_compl(struct fc_fcp_pkt *);
+
+static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
+static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
+
+/*
+ * command status codes
+ */
+#define FC_COMPLETE 0
+#define FC_CMD_ABORTED 1
+#define FC_CMD_RESET 2
+#define FC_CMD_PLOGO 3
+#define FC_SNS_RCV 4
+#define FC_TRANS_ERR 5
+#define FC_DATA_OVRRUN 6
+#define FC_DATA_UNDRUN 7
+#define FC_ERROR 8
+#define FC_HRD_ERROR 9
+#define FC_CMD_TIME_OUT 10
+
+/*
+ * Error recovery timeout values.
+ */
+#define FC_SCSI_ER_TIMEOUT (10 * HZ)
+#define FC_SCSI_TM_TOV (10 * HZ)
+#define FC_SCSI_REC_TOV (2 * HZ)
+#define FC_HOST_RESET_TIMEOUT (30 * HZ)
+
+#define FC_MAX_ERROR_CNT 5
+#define FC_MAX_RECOV_RETRY 3
+
+#define FC_FCP_DFLT_QUEUE_DEPTH 32
+
+/**
+ * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
+ * @lp: fc lport struct
+ * @gfp: gfp flags for allocation
+ *
+ * This is used by upper layer scsi driver.
+ * Return Value : scsi_pkt structure or null on allocation failure.
+ * Context : call from process context. no locking required.
+ */
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp, gfp_t gfp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ struct fc_fcp_pkt *fsp;
+
+ fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
+ if (fsp) {
+ memset(fsp, 0, sizeof(*fsp));
+ fsp->lp = lp;
+ atomic_set(&fsp->ref_cnt, 1);
+ init_timer(&fsp->timer);
+ INIT_LIST_HEAD(&fsp->list);
+ spin_lock_init(&fsp->scsi_pkt_lock);
+ }
+ return fsp;
+}
+
+/**
+ * fc_fcp_pkt_release - release hold on scsi_pkt packet
+ * @fsp: fcp packet struct
+ *
+ * This is used by upper layer scsi driver.
+ * Context : call from process and interrupt context.
+ * no locking required
+ */
+static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
+{
+ if (atomic_dec_and_test(&fsp->ref_cnt)) {
+ struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
+
+ mempool_free(fsp, si->scsi_pkt_pool);
+ }
+}
+
+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
+{
+ atomic_inc(&fsp->ref_cnt);
+}
+
+/**
+ * fc_fcp_pkt_destory - release hold on scsi_pkt packet
+ *
+ * @seq: exchange sequence
+ * @fsp: fcp packet struct
+ *
+ * Release hold on scsi_pkt packet set to keep scsi_pkt
+ * till EM layer exch resource is not freed.
+ * Context : called from from EM layer.
+ * no locking required
+ */
+static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
+{
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_lock_pkt - lock a packet and get a ref to it.
+ * @fsp: fcp packet
+ *
+ * We should only return error if we return a command to scsi-ml before
+ * getting a response. This can happen in cases where we send a abort, but
+ * do not wait for the response and the abort and command can be passing
+ * each other on the wire/network-layer.
+ *
+ * Note: this function locks the packet and gets a reference to allow
+ * callers to call the completion function while the lock is held and
+ * not have to worry about the packets refcount.
+ *
+ * TODO: Maybe we should just have callers grab/release the lock and
+ * have a function that they call to verify the fsp and grab a ref if
+ * needed.
+ */
+static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->state & FC_SRB_COMPL) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ return -EPERM;
+ }
+
+ fc_fcp_pkt_hold(fsp);
+ return 0;
+}
+
+static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ fc_fcp_pkt_release(fsp);
+}
+
+static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
+{
+ if (!(fsp->state & FC_SRB_COMPL))
+ mod_timer(&fsp->timer, jiffies + delay);
+}
+
+static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
+{
+ if (!fsp->seq_ptr)
+ return -EINVAL;
+
+ fsp->state |= FC_SRB_ABORT_PENDING;
+ return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+}
+
+/*
+ * Retry command.
+ * An abort isn't needed.
+ */
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+{
+ if (fsp->seq_ptr) {
+ fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete_locked(fsp);
+}
+
+/*
+ * Receive SCSI data from target.
+ * Called after receiving solicited data.
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct scsi_cmnd *sc = fsp->cmd;
+ struct fc_lport *lp = fsp->lp;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ size_t start_offset;
+ size_t offset;
+ u32 crc;
+ u32 copy_len = 0;
+ size_t len;
+ void *buf;
+ struct scatterlist *sg;
+ size_t remaining;
+
+ fh = fc_frame_header_get(fp);
+ offset = ntohl(fh->fh_parm_offset);
+ start_offset = offset;
+ len = fr_len(fp) - sizeof(*fh);
+ buf = fc_frame_payload_get(fp, 0);
+
+ if (offset + len > fsp->data_len) {
+ /*
+ * this should never happen
+ */
+ if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
+ fc_frame_crc_check(fp))
+ goto crc_err;
+ FC_DEBUG_FCP("data received past end. len %zx offset %zx "
+ "data_len %x\n", len, offset, fsp->data_len);
+ fc_fcp_retry_cmd(fsp);
+ return;
+ }
+ if (offset != fsp->xfer_len)
+ fsp->state |= FC_SRB_DISCONTIG;
+
+ crc = 0;
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+ crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+
+ sg = scsi_sglist(sc);
+ remaining = len;
+
+ while (remaining > 0 && sg) {
+ size_t off;
+ void *page_addr;
+ size_t sg_bytes;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ sg_bytes = min(remaining, sg->length - offset);
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we are limited to mapping PAGE_SIZE at a time.
+ */
+ off = offset + sg->offset;
+ sg_bytes = min(sg_bytes, (size_t)
+ (PAGE_SIZE - (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ if (!page_addr)
+ break; /* XXX panic? */
+
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+ crc = crc32(crc, buf, sg_bytes);
+ memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
+ sg_bytes);
+
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ buf += sg_bytes;
+ offset += sg_bytes;
+ remaining -= sg_bytes;
+ copy_len += sg_bytes;
+ }
+
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+ buf = fc_frame_payload_get(fp, 0);
+ if (len % 4) {
+ crc = crc32(crc, buf + len, 4 - (len % 4));
+ len += 4 - (len % 4);
+ }
+
+ if (~crc != le32_to_cpu(fr_crc(fp))) {
+crc_err:
+ stats = lp->dev_stats[smp_processor_id()];
+ stats->ErrorFrames++;
+ if (stats->InvalidCRCCount++ < 5)
+ FC_DBG("CRC error on data frame\n");
+ /*
+ * Assume the frame is total garbage.
+ * We may have copied it over the good part
+ * of the buffer.
+ * If so, we need to retry the entire operation.
+ * Otherwise, ignore it.
+ */
+ if (fsp->state & FC_SRB_DISCONTIG)
+ fc_fcp_retry_cmd(fsp);
+ return;
+ }
+ }
+
+ if (fsp->xfer_contig_end == start_offset)
+ fsp->xfer_contig_end += copy_len;
+ fsp->xfer_len += copy_len;
+
+ /*
+ * In the very rare event that this data arrived after the response
+ * and completes the transfer, call the completion handler.
+ */
+ if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fc_fcp_complete_locked(fsp);
+}
+
+/*
+ * fc_fcp_send_data - Send SCSI data to target.
+ * @fsp: ptr to fc_fcp_pkt
+ * @sp: ptr to this sequence
+ * @offset: starting offset for this data request
+ * @seq_blen: the burst length for this data request
+ *
+ * Called after receiving a Transfer Ready data descriptor.
+ * if LLD is capable of seq offload then send down seq_blen
+ * size of data in single frame, otherwise send multiple FC
+ * frames of max FC frame payload supported by target port.
+ *
+ * Returns : 0 for success.
+ */
+static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
+ size_t offset, size_t seq_blen)
+{
+ struct fc_exch *ep;
+ struct scsi_cmnd *sc;
+ struct scatterlist *sg;
+ struct fc_frame *fp = NULL;
+ struct fc_lport *lp = fsp->lp;
+ size_t remaining;
+ size_t t_blen;
+ size_t tlen;
+ size_t sg_bytes;
+ size_t frame_offset, fh_parm_offset;
+ int error;
+ void *data = NULL;
+ void *page_addr;
+ int using_sg = lp->sg_supp;
+ u32 f_ctl;
+
+ WARN_ON(seq_blen <= 0);
+ if (unlikely(offset + seq_blen > fsp->data_len)) {
+ /* this should never happen */
+ FC_DEBUG_FCP("xfer-ready past end. seq_blen %zx offset %zx\n",
+ seq_blen, offset);
+ fc_fcp_send_abort(fsp);
+ return 0;
+ } else if (offset != fsp->xfer_len) {
+ /* Out of Order Data Request - no problem, but unexpected. */
+ FC_DEBUG_FCP("xfer-ready non-contiguous. "
+ "seq_blen %zx offset %zx\n", seq_blen, offset);
+ }
+
+ /*
+ * if LLD is capable of seq_offload then set transport
+ * burst length (t_blen) to seq_blen, otherwise set t_blen
+ * to max FC frame payload previously set in fsp->max_payload.
+ */
+ t_blen = lp->seq_offload ? seq_blen : fsp->max_payload;
+ WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
+ if (t_blen > 512)
+ t_blen &= ~(512 - 1); /* round down to block size */
+ WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
+ sc = fsp->cmd;
+
+ remaining = seq_blen;
+ fh_parm_offset = frame_offset = offset;
+ tlen = 0;
+ seq = lp->tt.seq_start_next(seq);
+ f_ctl = FC_FC_REL_OFF;
+ WARN_ON(!seq);
+
+ /*
+ * If a get_page()/put_page() will fail, don't use sg lists
+ * in the fc_frame structure.
+ *
+ * The put_page() may be long after the I/O has completed
+ * in the case of FCoE, since the network driver does it
+ * via free_skb(). See the test in free_pages_check().
+ *
+ * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
+ */
+ if (using_sg) {
+ for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
+ if (page_count(sg_page(sg)) == 0 ||
+ (sg_page(sg)->flags & (1 << PG_lru |
+ 1 << PG_private |
+ 1 << PG_locked |
+ 1 << PG_active |
+ 1 << PG_slab |
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+ 1 << PG_buddy))) {
+ using_sg = 0;
+ break;
+ }
+ }
+ }
+ sg = scsi_sglist(sc);
+
+ while (remaining > 0 && sg) {
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ if (!fp) {
+ tlen = min(t_blen, remaining);
+
+ /*
+ * TODO. Temporary workaround. fc_seq_send() can't
+ * handle odd lengths in non-linear skbs.
+ * This will be the final fragment only.
+ */
+ if (tlen % 4)
+ using_sg = 0;
+ if (using_sg) {
+ fp = _fc_frame_alloc(lp, 0);
+ if (!fp)
+ return -ENOMEM;
+ } else {
+ fp = fc_frame_alloc(lp, tlen);
+ if (!fp)
+ return -ENOMEM;
+
+ data = (void *)(fr_hdr(fp)) +
+ sizeof(struct fc_frame_header);
+ }
+ fh_parm_offset = frame_offset;
+ fr_max_payload(fp) = fsp->max_payload;
+ }
+ sg_bytes = min(tlen, sg->length - offset);
+ if (using_sg) {
+ WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
+ FC_FRAME_SG_LEN);
+ get_page(sg_page(sg));
+ skb_fill_page_desc(fp_skb(fp),
+ skb_shinfo(fp_skb(fp))->nr_frags,
+ sg_page(sg), sg->offset + offset,
+ sg_bytes);
+ fp_skb(fp)->data_len += sg_bytes;
+ fr_len(fp) += sg_bytes;
+ fp_skb(fp)->truesize += PAGE_SIZE;
+ } else {
+ size_t off = offset + sg->offset;
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we must not cross pages inside the kmap.
+ */
+ sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
+ (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) +
+ (off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
+ sg_bytes);
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ data += sg_bytes;
+ }
+ offset += sg_bytes;
+ frame_offset += sg_bytes;
+ tlen -= sg_bytes;
+ remaining -= sg_bytes;
+
+ if (tlen)
+ continue;
+
+ /*
+ * Send sequence with transfer sequence initiative in case
+ * this is last FCP frame of the sequence.
+ */
+ if (remaining == 0)
+ f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
+
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, fh_parm_offset);
+
+ /*
+ * send fragment using for a sequence.
+ */
+ error = lp->tt.seq_send(lp, seq, fp);
+ if (error) {
+ WARN_ON(1); /* send error should be rare */
+ fc_fcp_retry_cmd(fsp);
+ return 0;
+ }
+ fp = NULL;
+ }
+ fsp->xfer_len += seq_blen; /* premature count? */
+ return 0;
+}
+
+static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int ba_done = 1;
+ struct fc_ba_rjt *brp;
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ break;
+ case FC_RCTL_BA_RJT:
+ brp = fc_frame_payload_get(fp, sizeof(*brp));
+ if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
+ break;
+ /* fall thru */
+ default:
+ /*
+ * we will let the command timeout
+ * and scsi-ml recover in this case,
+ * therefore cleared the ba_done flag.
+ */
+ ba_done = 0;
+ }
+
+ if (ba_done) {
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
+ }
+}
+
+/*
+ * fc_fcp_reduce_can_queue - drop can_queue
+ * @lp: lport to drop queueing for
+ *
+ * If we are getting memory allocation failures, then we may
+ * be trying to execute too many commands. We let the running
+ * commands complete or timeout, then try again with a reduced
+ * can_queue. Eventually we will hit the point where we run
+ * on all reserved structs.
+ */
+static void fc_fcp_reduce_can_queue(struct fc_lport *lp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ unsigned long flags;
+ int can_queue;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ if (si->throttled)
+ goto done;
+ si->throttled = 1;
+
+ can_queue = lp->host->can_queue;
+ can_queue >>= 1;
+ if (!can_queue)
+ can_queue = 1;
+ lp->host->can_queue = can_queue;
+ shost_printk(KERN_ERR, lp->host, "Could not allocate frame.\n"
+ "Reducing can_queue to %d.\n", can_queue);
+done:
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+/*
+ * exch mgr calls this routine to process scsi
+ * exchanges.
+ *
+ * Return : None
+ * Context : called from Soft IRQ context
+ * can not called holding list lock
+ */
+static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_lport *lp;
+ struct fc_frame_header *fh;
+ struct fcp_txrdy *dd;
+ u8 r_ctl;
+ int rc = 0;
+
+ if (IS_ERR(fp))
+ goto errout;
+
+ fh = fc_frame_header_get(fp);
+ r_ctl = fh->fh_r_ctl;
+ lp = fsp->lp;
+
+ if (!(lp->state & LPORT_ST_READY))
+ goto out;
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+ fsp->last_pkt_time = jiffies;
+
+ if (fh->fh_type == FC_TYPE_BLS) {
+ fc_fcp_abts_resp(fsp, fp);
+ goto unlock;
+ }
+
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+ goto unlock;
+
+ if (r_ctl == FC_RCTL_DD_DATA_DESC) {
+ /*
+ * received XFER RDY from the target
+ * need to send data to the target
+ */
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+ dd = fc_frame_payload_get(fp, sizeof(*dd));
+ WARN_ON(!dd);
+
+ rc = fc_fcp_send_data(fsp, seq,
+ (size_t) ntohl(dd->ft_data_ro),
+ (size_t) ntohl(dd->ft_burst_len));
+ if (!rc)
+ seq->rec_data = fsp->xfer_len;
+ else if (rc == -ENOMEM)
+ fsp->state |= FC_SRB_NOMEM;
+ } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
+ /*
+ * received a DATA frame
+ * next we will copy the data to the system buffer
+ */
+ WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
+ fc_fcp_recv_data(fsp, fp);
+ seq->rec_data = fsp->xfer_contig_end;
+ } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+
+ fc_fcp_resp(fsp, fp);
+ } else {
+ FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
+ }
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_frame_free(fp);
+errout:
+ if (IS_ERR(fp))
+ fc_fcp_error(fsp, fp);
+ else if (rc == -ENOMEM)
+ fc_fcp_reduce_can_queue(lp);
+}
+
+static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fcp_resp *fc_rp;
+ struct fcp_resp_ext *rp_ex;
+ struct fcp_resp_rsp_info *fc_rp_info;
+ u32 plen;
+ u32 expected_len;
+ u32 respl = 0;
+ u32 snsl = 0;
+ u8 flags = 0;
+
+ plen = fr_len(fp);
+ fh = (struct fc_frame_header *)fr_hdr(fp);
+ if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
+ goto len_err;
+ plen -= sizeof(*fh);
+ fc_rp = (struct fcp_resp *)(fh + 1);
+ fsp->cdb_status = fc_rp->fr_status;
+ flags = fc_rp->fr_flags;
+ fsp->scsi_comp_flags = flags;
+ expected_len = fsp->data_len;
+
+ if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
+ rp_ex = (void *)(fc_rp + 1);
+ if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
+ if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
+ goto len_err;
+ fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
+ if (flags & FCP_RSP_LEN_VAL) {
+ respl = ntohl(rp_ex->fr_rsp_len);
+ if (respl != sizeof(*fc_rp_info))
+ goto len_err;
+ if (fsp->wait_for_comp) {
+ /* Abuse cdb_status for rsp code */
+ fsp->cdb_status = fc_rp_info->rsp_code;
+ complete(&fsp->tm_done);
+ /*
+ * tmfs will not have any scsi cmd so
+ * exit here
+ */
+ return;
+ } else
+ goto err;
+ }
+ if (flags & FCP_SNS_LEN_VAL) {
+ snsl = ntohl(rp_ex->fr_sns_len);
+ if (snsl > SCSI_SENSE_BUFFERSIZE)
+ snsl = SCSI_SENSE_BUFFERSIZE;
+ memcpy(fsp->cmd->sense_buffer,
+ (char *)fc_rp_info + respl, snsl);
+ }
+ }
+ if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
+ if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
+ goto len_err;
+ if (flags & FCP_RESID_UNDER) {
+ fsp->scsi_resid = ntohl(rp_ex->fr_resid);
+ /*
+ * The cmnd->underflow is the minimum number of
+ * bytes that must be transfered for this
+ * command. Provided a sense condition is not
+ * present, make sure the actual amount
+ * transferred is at least the underflow value
+ * or fail.
+ */
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (fc_rp->fr_status == 0) &&
+ (scsi_bufflen(fsp->cmd) -
+ fsp->scsi_resid) < fsp->cmd->underflow)
+ goto err;
+ expected_len -= fsp->scsi_resid;
+ } else {
+ fsp->status_code = FC_ERROR;
+ }
+ }
+ }
+ fsp->state |= FC_SRB_RCV_STATUS;
+
+ /*
+ * Check for missing or extra data frames.
+ */
+ if (unlikely(fsp->xfer_len != expected_len)) {
+ if (fsp->xfer_len < expected_len) {
+ /*
+ * Some data may be queued locally,
+ * Wait a at least one jiffy to see if it is delivered.
+ * If this expires without data, we may do SRR.
+ */
+ fc_fcp_timer_set(fsp, 2);
+ return;
+ }
+ fsp->status_code = FC_DATA_OVRRUN;
+ FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
+ "data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ }
+ fc_fcp_complete_locked(fsp);
+ return;
+
+len_err:
+ FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
+ flags, fr_len(fp), respl, snsl);
+err:
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_complete_locked - complete processing of a fcp packet
+ * @fsp: fcp packet
+ *
+ * This function may sleep if a timer is pending. The packet lock must be
+ * held, and the host lock must not be held.
+ */
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp = fsp->lp;
+ struct fc_seq *seq;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ if (fsp->state & FC_SRB_ABORT_PENDING)
+ return;
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ if (!fsp->status_code)
+ fsp->status_code = FC_CMD_ABORTED;
+ } else {
+ /*
+ * Test for transport underrun, independent of response
+ * underrun status.
+ */
+ if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+ (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ fsp->status_code = FC_DATA_UNDRUN;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ }
+ }
+
+ seq = fsp->seq_ptr;
+ if (seq) {
+ fsp->seq_ptr = NULL;
+ if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
+ struct fc_frame *conf_frame;
+ struct fc_seq *csp;
+
+ csp = lp->tt.seq_start_next(seq);
+ conf_frame = fc_frame_alloc(fsp->lp, 0);
+ if (conf_frame) {
+ f_ctl = FC_FC_SEQ_INIT;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
+ ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, 0);
+ lp->tt.seq_send(lp, csp, conf_frame);
+ }
+ }
+ lp->tt.exch_done(seq);
+ }
+ fc_io_compl(fsp);
+}
+
+static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
+{
+ struct fc_lport *lp = fsp->lp;
+
+ if (fsp->seq_ptr) {
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->status_code = error;
+}
+
+/**
+ * fc_fcp_cleanup_each_cmd - run fn on each active command
+ * @lp: logical port
+ * @id: target id
+ * @lun: lun
+ * @error: fsp status code
+ *
+ * If lun or id is -1, they are ignored.
+ */
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
+ unsigned int lun, int error)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ struct fc_fcp_pkt *fsp;
+ struct scsi_cmnd *sc_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+restart:
+ list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
+ sc_cmd = fsp->cmd;
+ if (id != -1 && scmd_id(sc_cmd) != id)
+ continue;
+
+ if (lun != -1 && sc_cmd->device->lun != lun)
+ continue;
+
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+ if (!fc_fcp_lock_pkt(fsp)) {
+ fc_fcp_cleanup_cmd(fsp, error);
+ fc_io_compl(fsp);
+ fc_fcp_unlock_pkt(fsp);
+ }
+
+ fc_fcp_pkt_release(fsp);
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ /*
+ * while we dropped the lock multiple pkts could
+ * have been released, so we have to start over.
+ */
+ goto restart;
+ }
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+static void fc_fcp_abort_io(struct fc_lport *lp)
+{
+ fc_fcp_cleanup_each_cmd(lp, -1, -1, FC_HRD_ERROR);
+}
+
+/**
+ * fc_fcp_pkt_send - send a fcp packet to the lower level.
+ * @lp: fc lport
+ * @fsp: fc packet.
+ *
+ * This is called by upper layer protocol.
+ * Return : zero for success and -1 for failure
+ * Context : called from queuecommand which can be called from process
+ * or scsi soft irq.
+ * Locks : called with the host lock and irqs disabled.
+ */
+static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ int rc;
+
+ fsp->cmd->SCp.ptr = (char *)fsp;
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
+
+ int_to_scsilun(fsp->cmd->device->lun,
+ (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+ memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
+ list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+
+ spin_unlock_irq(lp->host->host_lock);
+ rc = lp->tt.fcp_cmd_send(lp, fsp, fc_fcp_recv);
+ spin_lock_irq(lp->host->host_lock);
+ if (rc)
+ list_del(&fsp->list);
+
+ return rc;
+}
+
+static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg))
+{
+ struct fc_frame *fp;
+ struct fc_seq *seq;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ const size_t len = sizeof(fsp->cdb_cmd);
+ int rc = 0;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return 0;
+
+ fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
+ if (!fp) {
+ rc = -1;
+ goto unlock;
+ }
+
+ memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
+ fr_cmd(fp) = fsp->cmd;
+ rport = fsp->rport;
+ fsp->max_payload = rport->maxframe_size;
+ rp = rport->dd_data;
+
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
+ fc_host_port_id(rp->local_port->host), FC_TYPE_FCP,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
+ if (!seq) {
+ fc_frame_free(fp);
+ rc = -1;
+ goto unlock;
+ }
+ fsp->last_pkt_time = jiffies;
+ fsp->seq_ptr = seq;
+ fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
+
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp,
+ (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
+ FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+ return rc;
+}
+
+/*
+ * transport error handler
+ */
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int error = PTR_ERR(fp);
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ switch (error) {
+ case -FC_EX_CLOSED:
+ fc_fcp_retry_cmd(fsp);
+ goto unlock;
+ default:
+ FC_DBG("unknown error %ld\n", PTR_ERR(fp));
+ }
+ /*
+ * clear abort pending, because the lower layer
+ * decided to force completion.
+ */
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->status_code = FC_CMD_PLOGO;
+ fc_fcp_complete_locked(fsp);
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/*
+ * Scsi abort handler- calls to send an abort
+ * and then wait for abort completion
+ */
+static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+ int rc = FAILED;
+
+ if (fc_fcp_send_abort(fsp))
+ return FAILED;
+
+ init_completion(&fsp->tm_done);
+ fsp->wait_for_comp = 1;
+
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->wait_for_comp = 0;
+
+ if (!rc) {
+ FC_DBG("target abort cmd failed\n");
+ rc = FAILED;
+ } else if (fsp->state & FC_SRB_ABORTED) {
+ FC_DBG("target abort cmd passed\n");
+ rc = SUCCESS;
+ fc_fcp_complete_locked(fsp);
+ }
+
+ return rc;
+}
+
+/*
+ * Retry LUN reset after resource allocation failed.
+ */
+static void fc_lun_reset_send(unsigned long data)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_lport *lp = fsp->lp;
+ if (lp->tt.fcp_cmd_send(lp, fsp, fc_tm_done)) {
+ if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
+ return;
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+ setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ fc_fcp_unlock_pkt(fsp);
+ }
+}
+
+/*
+ * Scsi device reset handler- send a LUN RESET to the device
+ * and wait for reset reply
+ */
+static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+ unsigned int id, unsigned int lun)
+{
+ int rc;
+
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
+ int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+
+ fsp->wait_for_comp = 1;
+ init_completion(&fsp->tm_done);
+
+ fc_lun_reset_send((unsigned long)fsp);
+
+ /*
+ * wait for completion of reset
+ * after that make sure all commands are terminated
+ */
+ rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->state |= FC_SRB_COMPL;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ del_timer_sync(&fsp->timer);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->seq_ptr) {
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->wait_for_comp = 0;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ if (!rc) {
+ FC_DBG("lun reset failed\n");
+ return FAILED;
+ }
+
+ /* cdb_status holds the tmf's rsp code */
+ if (fsp->cdb_status != FCP_TMF_CMPL)
+ return FAILED;
+
+ FC_DBG("lun reset to lun %u completed\n", lun);
+ fc_fcp_cleanup_each_cmd(lp, id, lun, FC_CMD_ABORTED);
+ return SUCCESS;
+}
+
+/*
+ * Task Managment response handler
+ */
+static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ struct fc_frame_header *fh;
+
+ if (IS_ERR(fp)) {
+ /*
+ * If there is an error just let it timeout or wait
+ * for TMF to be aborted if it timedout.
+ *
+ * scsi-eh will escalate for when either happens.
+ */
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ /*
+ * raced with eh timeout handler.
+ */
+ if (!fsp->seq_ptr || !fsp->wait_for_comp) {
+