aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/megaraid
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/megaraid')
-rw-r--r--drivers/scsi/megaraid/Makefile2
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h12
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c92
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c32
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c3445
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h714
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c6325
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c1241
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2676
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h766
10 files changed, 11756 insertions, 3549 deletions
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index f469915b97c..5826ed509e3 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
+megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \
+ megaraid_sas_fp.o
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
index 170399ef06f..e01c6f7c2ca 100644
--- a/drivers/scsi/megaraid/mbox_defs.h
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -497,7 +497,7 @@ typedef struct {
* @inserted_drive : channel:Id of inserted drive
* @battery_status : bit 0: battery module missing
* bit 1: VBAD
- * bit 2: temprature high
+ * bit 2: temperature high
* bit 3: battery pack missing
* bit 4,5:
* 00 - charge complete
@@ -660,7 +660,7 @@ typedef struct {
* @lparam : logical drives parameters
* @span : span
*
- * 8-LD logical drive with upto 8 spans
+ * 8-LD logical drive with up to 8 spans
*/
typedef struct {
logdrv_param_t lparam;
@@ -673,7 +673,7 @@ typedef struct {
* @lparam : logical drives parameters
* @span : span
*
- * 8-LD logical drive with upto 4 spans
+ * 8-LD logical drive with up to 4 spans
*/
typedef struct {
logdrv_param_t lparam;
@@ -686,7 +686,7 @@ typedef struct {
* @type : Type of the device
* @cur_status : current status of the device
* @tag_depth : Level of tagging
- * @sync_neg : sync negotiation - ENABLE or DISBALE
+ * @sync_neg : sync negotiation - ENABLE or DISABLE
* @size : configurable size in terms of 512 byte
*/
typedef struct {
@@ -720,7 +720,7 @@ typedef struct {
* @ldrv : logical drives information
* @pdrv : physical drives information
*
- * Disk array for 8LD logical drives with upto 8 spans
+ * Disk array for 8LD logical drives with up to 8 spans
*/
typedef struct {
uint8_t numldrv;
@@ -737,7 +737,7 @@ typedef struct {
* @ldrv : logical drives information
* @pdrv : physical drives information
*
- * Disk array for 8LD logical drives with upto 4 spans
+ * Disk array for 8LD logical drives with up to 4 spans
*/
typedef struct {
uint8_t numldrv;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 805bb61dde1..e2237a97cb9 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -67,9 +67,11 @@
*
* NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287
*
- * For history of changes, see Documentation/ChangeLog.megaraid
+ * For history of changes, see Documentation/scsi/ChangeLog.megaraid
*/
+#include <linux/slab.h>
+#include <linux/module.h>
#include "megaraid_mbox.h"
static int megaraid_init(void);
@@ -112,8 +114,7 @@ static int megaraid_mbox_fire_sync_cmd(adapter_t *);
static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
static void megaraid_mbox_setup_device_map(adapter_t *);
-static int megaraid_queue_command(struct scsi_cmnd *,
- void (*)(struct scsi_cmnd *));
+static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
@@ -304,7 +305,7 @@ static struct pci_driver megaraid_pci_driver = {
.name = "megaraid",
.id_table = pci_id_table_g,
.probe = megaraid_probe_one,
- .remove = __devexit_p(megaraid_detach_one),
+ .remove = megaraid_detach_one,
.shutdown = megaraid_mbox_shutdown,
};
@@ -335,12 +336,17 @@ static struct device_attribute *megaraid_sdev_attrs[] = {
* megaraid_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
+ * @reason: calling context
*
* Return value:
* actual depth set
*/
-static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth)
+static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason)
{
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ return -EOPNOTSUPP;
+
if (qdepth > MBOX_MAX_SCSI_CMDS)
qdepth = MBOX_MAX_SCSI_CMDS;
scsi_adjust_queue_depth(sdev, 0, qdepth);
@@ -361,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = {
.eh_host_reset_handler = megaraid_reset_handler,
.change_queue_depth = megaraid_change_queue_depth,
.use_clustering = ENABLE_CLUSTERING,
+ .no_write_same = 1,
.sdev_attrs = megaraid_sdev_attrs,
.shost_attrs = megaraid_shost_attrs,
};
@@ -428,7 +435,7 @@ megaraid_exit(void)
* This routine should be called whenever a new adapter is detected by the
* PCI hotplug susbsystem.
*/
-static int __devinit
+static int
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
adapter_t *adapter;
@@ -473,7 +480,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// Setup the default DMA mask. This would be changed later on
// depending on hardware capabilities
- if (pci_set_dma_mask(adapter->pdev, DMA_32BIT_MASK) != 0) {
+ if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
@@ -528,7 +535,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_cmm_unreg:
- pci_set_drvdata(pdev, NULL);
megaraid_cmm_unregister(adapter);
out_fini_mbox:
megaraid_fini_mbox(adapter);
@@ -543,7 +549,7 @@ out_probe_one:
/**
* megaraid_detach_one - release framework resources and call LLD release routine
- * @pdev : handle for our PCI cofiguration space
+ * @pdev : handle for our PCI configuration space
*
* This routine is called during driver unload. We free all the allocated
* resources and call the corresponding LLD so that it can also release all
@@ -588,11 +594,6 @@ megaraid_detach_one(struct pci_dev *pdev)
// detach from the IO sub-system
megaraid_io_detach(adapter);
- // reset the device state in the PCI structure. We check this
- // condition when we enter here. If the device state is NULL,
- // that would mean the device has already been removed
- pci_set_drvdata(pdev, NULL);
-
// Unregister from common management module
//
// FIXME: this must return success or failure for conditions if there
@@ -729,7 +730,7 @@ megaraid_io_detach(adapter_t *adapter)
* - Allocate memory required for all the commands
* - Use internal library of FW routines, build up complete soft state
*/
-static int __devinit
+static int
megaraid_init_mbox(adapter_t *adapter)
{
struct pci_dev *pdev;
@@ -900,11 +901,11 @@ megaraid_init_mbox(adapter_t *adapter)
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
- if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
+ if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: DMA mask for 64-bit failed\n"));
- if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) {
+ if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: 32-bit DMA mask failed\n"));
goto out_free_sysfs_res;
@@ -973,7 +974,7 @@ megaraid_fini_mbox(adapter_t *adapter)
* @adapter : soft state of the raid controller
*
* Allocate and align the shared mailbox. This maibox is used to issue
- * all the commands. For IO based controllers, the mailbox is also regsitered
+ * all the commands. For IO based controllers, the mailbox is also registered
* with the FW. Allocate memory for all commands as well.
* This is our big allocator.
*/
@@ -1478,7 +1479,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
* Queue entry point for mailbox based controllers.
*/
static int
-megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
+megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
{
adapter_t *adapter;
scb_t *scb;
@@ -1507,6 +1508,8 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
return if_busy;
}
+static DEF_SCSI_QCMD(megaraid_queue_command)
+
/**
* megaraid_mbox_build_cmd - transform the mid-layer scsi commands
* @adapter : controller's soft state
@@ -2019,7 +2022,7 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
* @scb : scsi control block
* @scp : scsi command from the mid-layer
*
- * Prepare a command for the scsi physical devices. This rountine prepares
+ * Prepare a command for the scsi physical devices. This routine prepares
* commands for devices which can take extended CDBs (>10 bytes).
*/
static void
@@ -2308,8 +2311,8 @@ megaraid_mbox_dpc(unsigned long devp)
// Was an abort issued for this command earlier
if (scb->state & SCB_ABORT) {
con_log(CL_ANN, (KERN_NOTICE
- "megaraid: aborted cmd %lx[%x] completed\n",
- scp->serial_number, scb->sno));
+ "megaraid: aborted cmd [%x] completed\n",
+ scb->sno));
}
/*
@@ -2465,8 +2468,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
raid_dev = ADAP2RAIDDEV(adapter);
con_log(CL_ANN, (KERN_WARNING
- "megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n",
- scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp),
+ "megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
+ scp->cmnd[0], SCP2CHANNEL(scp),
SCP2TARGET(scp), SCP2LUN(scp)));
// If FW has stopped responding, simply return failure
@@ -2489,9 +2492,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
list_del_init(&scb->list); // from completed list
con_log(CL_ANN, (KERN_WARNING
- "megaraid: %ld:%d[%d:%d], abort from completed list\n",
- scp->serial_number, scb->sno,
- scb->dev_channel, scb->dev_target));
+ "megaraid: %d[%d:%d], abort from completed list\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
scp->result = (DID_ABORT << 16);
scp->scsi_done(scp);
@@ -2520,9 +2522,8 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
ASSERT(!(scb->state & SCB_ISSUED));
con_log(CL_ANN, (KERN_WARNING
- "megaraid abort: %ld[%d:%d], driver owner\n",
- scp->serial_number, scb->dev_channel,
- scb->dev_target));
+ "megaraid abort: [%d:%d], driver owner\n",
+ scb->dev_channel, scb->dev_target));
scp->result = (DID_ABORT << 16);
scp->scsi_done(scp);
@@ -2553,25 +2554,21 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
if (!(scb->state & SCB_ISSUED)) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid abort: %ld%d[%d:%d], invalid state\n",
- scp->serial_number, scb->sno, scb->dev_channel,
- scb->dev_target));
+ "megaraid abort: %d[%d:%d], invalid state\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
BUG();
}
else {
con_log(CL_ANN, (KERN_WARNING
- "megaraid abort: %ld:%d[%d:%d], fw owner\n",
- scp->serial_number, scb->sno, scb->dev_channel,
- scb->dev_target));
+ "megaraid abort: %d[%d:%d], fw owner\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
}
}
}
spin_unlock_irq(&adapter->lock);
if (!found) {
- con_log(CL_ANN, (KERN_WARNING
- "megaraid abort: scsi cmd:%ld, do now own\n",
- scp->serial_number));
+ con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
// FIXME: Should there be a callback for this command?
return SUCCESS;
@@ -2584,7 +2581,7 @@ megaraid_abort_handler(struct scsi_cmnd *scp)
}
/**
- * megaraid_reset_handler - device reset hadler for mailbox based driver
+ * megaraid_reset_handler - device reset handler for mailbox based driver
* @scp : reference command
*
* Reset handler for the mailbox based controller. First try to find out if
@@ -2642,9 +2639,8 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
} else {
if (scb->scp == scp) { // Found command
con_log(CL_ANN, (KERN_WARNING
- "megaraid: %ld:%d[%d:%d], reset from pending list\n",
- scp->serial_number, scb->sno,
- scb->dev_channel, scb->dev_target));
+ "megaraid: %d[%d:%d], reset from pending list\n",
+ scb->sno, scb->dev_channel, scb->dev_target));
} else {
con_log(CL_ANN, (KERN_WARNING
"megaraid: IO packet with %d[%d:%d] being reset\n",
@@ -2682,7 +2678,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
}
- // bailout if no recovery happended in reset time
+ // bailout if no recovery happened in reset time
if (adapter->outstanding_cmds == 0) {
break;
}
@@ -2704,7 +2700,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
}
else {
con_log(CL_ANN, (KERN_NOTICE
- "megaraid mbox: reset sequence completed sucessfully\n"));
+ "megaraid mbox: reset sequence completed successfully\n"));
}
@@ -2730,7 +2726,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
}
out:
- spin_unlock_irq(&adapter->lock);
+ spin_unlock(&adapter->lock);
return rval;
}
@@ -3445,7 +3441,7 @@ megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
* megaraid_mbox_setup_device_map - manage device ids
* @adapter : Driver's soft state
*
- * Manange the device ids to have an appropraite mapping between the kernel
+ * Manage the device ids to have an appropriate mapping between the kernel
* scsi addresses and megaraid scsi and logical drive addresses. We export
* scsi devices on their actual addresses, whereas the logical drives are
* exported on a virtual scsi channel.
@@ -3966,7 +3962,7 @@ megaraid_sysfs_get_ldmap_timeout(unsigned long data)
* NOTE: The commands issuance functionality is not generalized and
* implemented in context of "get ld map" command only. If required, the
* command issuance logical can be trivially pulled out and implemented as a
- * standalone libary. For now, this should suffice since there is no other
+ * standalone library. For now, this should suffice since there is no other
* user of this interface.
*
* Return 0 on success.
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index f680561d2c6..a70692779a1 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -15,13 +15,15 @@
* Common management module
*/
#include <linux/sched.h>
-#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
#include "megaraid_mm.h"
// Entry points for char node driver
+static DEFINE_MUTEX(mraid_mm_mutex);
static int mraid_mm_open(struct inode *, struct file *);
-static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long);
+static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
// routines to convert to and from the old the format
@@ -69,11 +71,12 @@ static wait_queue_head_t wait_q;
static const struct file_operations lsi_fops = {
.open = mraid_mm_open,
- .ioctl = mraid_mm_ioctl,
+ .unlocked_ioctl = mraid_mm_unlocked_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mraid_mm_compat_ioctl,
#endif
.owner = THIS_MODULE,
+ .llseek = noop_llseek,
};
static struct miscdevice megaraid_mm_dev = {
@@ -97,7 +100,6 @@ mraid_mm_open(struct inode *inode, struct file *filep)
*/
if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
- cycle_kernel_lock();
return 0;
}
@@ -109,8 +111,7 @@ mraid_mm_open(struct inode *inode, struct file *filep)
* @arg : user ioctl packet
*/
static int
-mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
- unsigned long arg)
+mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
uioc_t *kioc;
char signature[EXT_IOCTL_SIGN_SZ] = {0};
@@ -217,6 +218,19 @@ mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
return rval;
}
+static long
+mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
+ mutex_lock(&mraid_mm_mutex);
+ err = mraid_mm_ioctl(filep, cmd, arg);
+ mutex_unlock(&mraid_mm_mutex);
+
+ return err;
+}
/**
* mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
@@ -472,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
pthru32->dataxferaddr = kioc->buf_paddr;
if (kioc->data_dir & UIOC_WR) {
+ if (pthru32->dataxferlen > kioc->xferlen)
+ return -EINVAL;
if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
pthru32->dataxferlen)) {
return (-EFAULT);
@@ -882,7 +898,7 @@ hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
/**
* mraid_mm_register_adp - Registration routine for low level drivers
- * @lld_adp : Adapter objejct
+ * @lld_adp : Adapter object
*/
int
mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
@@ -1224,7 +1240,7 @@ mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
{
int err;
- err = mraid_mm_ioctl(NULL, filep, cmd, arg);
+ err = mraid_mm_ioctl(filep, cmd, arg);
return err;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
deleted file mode 100644
index fc7ac158476..00000000000
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ /dev/null
@@ -1,3445 +0,0 @@
-/*
- *
- * Linux MegaRAID driver for SAS based RAID controllers
- *
- * Copyright (c) 2003-2005 LSI Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * FILE : megaraid_sas.c
- * Version : v00.00.03.20-rc1
- *
- * Authors:
- * (email-id : megaraidlinux@lsi.com)
- * Sreenivas Bagalkote
- * Sumant Patro
- * Bo Yang
- *
- * List of supported controllers
- *
- * OEM Product Name VID DID SSVID SSID
- * --- ------------ --- --- ---- ----
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/list.h>
-#include <linux/moduleparam.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/smp_lock.h>
-#include <linux/uio.h>
-#include <asm/uaccess.h>
-#include <linux/fs.h>
-#include <linux/compat.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include "megaraid_sas.h"
-
-/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
- "Complete cmds from IO path, (default=0)");
-
-MODULE_LICENSE("GPL");
-MODULE_VERSION(MEGASAS_VERSION);
-MODULE_AUTHOR("megaraidlinux@lsi.com");
-MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
-
-/*
- * PCI ID table for all supported controllers
- */
-static struct pci_device_id megasas_pci_table[] = {
-
- {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
- /* xscale IOP */
- {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
- /* ppc IOP */
- {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
- /* ppc IOP */
- {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
- /* xscale IOP, vega */
- {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
- /* xscale IOP */
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, megasas_pci_table);
-
-static int megasas_mgmt_majorno;
-static struct megasas_mgmt_info megasas_mgmt_info;
-static struct fasync_struct *megasas_async_queue;
-static DEFINE_MUTEX(megasas_async_queue_mutex);
-
-static u32 megasas_dbg_lvl;
-
-static void
-megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
- u8 alt_status);
-
-/**
- * megasas_get_cmd - Get a command from the free pool
- * @instance: Adapter soft state
- *
- * Returns a free command from the pool
- */
-static struct megasas_cmd *megasas_get_cmd(struct megasas_instance
- *instance)
-{
- unsigned long flags;
- struct megasas_cmd *cmd = NULL;
-
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
-
- if (!list_empty(&instance->cmd_pool)) {
- cmd = list_entry((&instance->cmd_pool)->next,
- struct megasas_cmd, list);
- list_del_init(&cmd->list);
- } else {
- printk(KERN_ERR "megasas: Command pool empty!\n");
- }
-
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
- return cmd;
-}
-
-/**
- * megasas_return_cmd - Return a cmd to free command pool
- * @instance: Adapter soft state
- * @cmd: Command packet to be returned to free command pool
- */
-static inline void
-megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
-
- cmd->scmd = NULL;
- list_add_tail(&cmd->list, &instance->cmd_pool);
-
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
-}
-
-
-/**
-* The following functions are defined for xscale
-* (deviceid : 1064R, PERC5) controllers
-*/
-
-/**
- * megasas_enable_intr_xscale - Enables interrupts
- * @regs: MFI register set
- */
-static inline void
-megasas_enable_intr_xscale(struct megasas_register_set __iomem * regs)
-{
- writel(1, &(regs)->outbound_intr_mask);
-
- /* Dummy readl to force pci flush */
- readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_disable_intr_xscale -Disables interrupt
- * @regs: MFI register set
- */
-static inline void
-megasas_disable_intr_xscale(struct megasas_register_set __iomem * regs)
-{
- u32 mask = 0x1f;
- writel(mask, &regs->outbound_intr_mask);
- /* Dummy readl to force pci flush */
- readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_read_fw_status_reg_xscale - returns the current FW status value
- * @regs: MFI register set
- */
-static u32
-megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
-{
- return readl(&(regs)->outbound_msg_0);
-}
-/**
- * megasas_clear_interrupt_xscale - Check & clear interrupt
- * @regs: MFI register set
- */
-static int
-megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
-{
- u32 status;
- /*
- * Check if it is our interrupt
- */
- status = readl(&regs->outbound_intr_status);
-
- if (!(status & MFI_OB_INTR_STATUS_MASK)) {
- return 1;
- }
-
- /*
- * Clear the interrupt by writing back the same value
- */
- writel(status, &regs->outbound_intr_status);
-
- return 0;
-}
-
-/**
- * megasas_fire_cmd_xscale - Sends command to the FW
- * @frame_phys_addr : Physical address of cmd
- * @frame_count : Number of frames for the command
- * @regs : MFI register set
- */
-static inline void
-megasas_fire_cmd_xscale(dma_addr_t frame_phys_addr,u32 frame_count, struct megasas_register_set __iomem *regs)
-{
- writel((frame_phys_addr >> 3)|(frame_count),
- &(regs)->inbound_queue_port);
-}
-
-static struct megasas_instance_template megasas_instance_template_xscale = {
-
- .fire_cmd = megasas_fire_cmd_xscale,
- .enable_intr = megasas_enable_intr_xscale,
- .disable_intr = megasas_disable_intr_xscale,
- .clear_intr = megasas_clear_intr_xscale,
- .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
-};
-
-/**
-* This is the end of set of functions & definitions specific
-* to xscale (deviceid : 1064R, PERC5) controllers
-*/
-
-/**
-* The following functions are defined for ppc (deviceid : 0x60)
-* controllers
-*/
-
-/**
- * megasas_enable_intr_ppc - Enables interrupts
- * @regs: MFI register set
- */
-static inline void
-megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs)
-{
- writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
-
- writel(~0x80000004, &(regs)->outbound_intr_mask);
-
- /* Dummy readl to force pci flush */
- readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_disable_intr_ppc - Disable interrupt
- * @regs: MFI register set
- */
-static inline void
-megasas_disable_intr_ppc(struct megasas_register_set __iomem * regs)
-{
- u32 mask = 0xFFFFFFFF;
- writel(mask, &regs->outbound_intr_mask);
- /* Dummy readl to force pci flush */
- readl(&regs->outbound_intr_mask);
-}
-
-/**
- * megasas_read_fw_status_reg_ppc - returns the current FW status value
- * @regs: MFI register set
- */
-static u32
-megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
-{
- return readl(&(regs)->outbound_scratch_pad);
-}
-
-/**
- * megasas_clear_interrupt_ppc - Check & clear interrupt
- * @regs: MFI register set
- */
-static int
-megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
-{
- u32 status;
- /*
- * Check if it is our interrupt
- */
- status = readl(&regs->outbound_intr_status);
-
- if (!(status & MFI_REPLY_1078_MESSAGE_INTERRUPT)) {
- return 1;
- }
-
- /*
- * Clear the interrupt by writing back the same value
- */
- writel(status, &regs->outbound_doorbell_clear);
-
- return 0;
-}
-/**
- * megasas_fire_cmd_ppc - Sends command to the FW
- * @frame_phys_addr : Physical address of cmd
- * @frame_count : Number of frames for the command
- * @regs : MFI register set
- */
-static inline void
-megasas_fire_cmd_ppc(dma_addr_t frame_phys_addr, u32 frame_count, struct megasas_register_set __iomem *regs)
-{
- writel((frame_phys_addr | (frame_count<<1))|1,
- &(regs)->inbound_queue_port);
-}
-
-static struct megasas_instance_template megasas_instance_template_ppc = {
-
- .fire_cmd = megasas_fire_cmd_ppc,
- .enable_intr = megasas_enable_intr_ppc,
- .disable_intr = megasas_disable_intr_ppc,
- .clear_intr = megasas_clear_intr_ppc,
- .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
-};
-
-/**
-* This is the end of set of functions & definitions
-* specific to ppc (deviceid : 0x60) controllers
-*/
-
-/**
- * megasas_issue_polled - Issues a polling command
- * @instance: Adapter soft state
- * @cmd: Command packet to be issued
- *
- * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
- */
-static int
-megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
- int i;
- u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
-
- struct megasas_header *frame_hdr = &cmd->frame->hdr;
-
- frame_hdr->cmd_status = 0xFF;
- frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
-
- /*
- * Issue the frame using inbound queue port
- */
- instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
-
- /*
- * Wait for cmd_status to change
- */
- for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) {
- rmb();
- msleep(1);
- }
-
- if (frame_hdr->cmd_status == 0xff)
- return -ETIME;
-
- return 0;
-}
-
-/**
- * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
- * @instance: Adapter soft state
- * @cmd: Command to be issued
- *
- * This function waits on an event for the command to be returned from ISR.
- * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
- * Used to issue ioctl commands.
- */
-static int
-megasas_issue_blocked_cmd(struct megasas_instance *instance,
- struct megasas_cmd *cmd)
-{
- cmd->cmd_status = ENODATA;
-
- instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
-
- wait_event_timeout(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA),
- MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
-
- return 0;
-}
-
-/**
- * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
- * @instance: Adapter soft state
- * @cmd_to_abort: Previously issued cmd to be aborted
- *
- * MFI firmware can abort previously issued AEN comamnd (automatic event
- * notification). The megasas_issue_blocked_abort_cmd() issues such abort
- * cmd and waits for return status.
- * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
- */
-static int
-megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
- struct megasas_cmd *cmd_to_abort)
-{
- struct megasas_cmd *cmd;
- struct megasas_abort_frame *abort_fr;
-
- cmd = megasas_get_cmd(instance);
-
- if (!cmd)
- return -1;
-
- abort_fr = &cmd->frame->abort;
-
- /*
- * Prepare and issue the abort frame
- */
- abort_fr->cmd = MFI_CMD_ABORT;
- abort_fr->cmd_status = 0xFF;
- abort_fr->flags = 0;
- abort_fr->abort_context = cmd_to_abort->index;
- abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
- abort_fr->abort_mfi_phys_addr_hi = 0;
-
- cmd->sync_cmd = 1;
- cmd->cmd_status = 0xFF;
-
- instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
-
- /*
- * Wait for this cmd to complete
- */
- wait_event_timeout(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF),
- MEGASAS_INTERNAL_CMD_WAIT_TIME*HZ);
-
- megasas_return_cmd(instance, cmd);
- return 0;
-}
-
-/**
- * megasas_make_sgl32 - Prepares 32-bit SGL
- * @instance: Adapter soft state
- * @scp: SCSI command from the mid-layer
- * @mfi_sgl: SGL to be filled in
- *
- * If successful, this function returns the number of SG elements. Otherwise,
- * it returnes -1.
- */
-static int
-megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
- union megasas_sgl *mfi_sgl)
-{
- int i;
- int sge_count;
- struct scatterlist *os_sgl;
-
- sge_count = scsi_dma_map(scp);
- BUG_ON(sge_count < 0);
-
- if (sge_count) {
- scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
- }
- }
- return sge_count;
-}
-
-/**
- * megasas_make_sgl64 - Prepares 64-bit SGL
- * @instance: Adapter soft state
- * @scp: SCSI command from the mid-layer
- * @mfi_sgl: SGL to be filled in
- *
- * If successful, this function returns the number of SG elements. Otherwise,
- * it returnes -1.
- */
-static int
-megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
- union megasas_sgl *mfi_sgl)
-{
- int i;
- int sge_count;
- struct scatterlist *os_sgl;
-
- sge_count = scsi_dma_map(scp);
- BUG_ON(sge_count < 0);
-
- if (sge_count) {
- scsi_for_each_sg(scp, os_sgl, sge_count, i) {
- mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
- mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
- }
- }
- return sge_count;
-}
-
- /**
- * megasas_get_frame_count - Computes the number of frames
- * @frame_type : type of frame- io or pthru frame
- * @sge_count : number of sg elements
- *
- * Returns the number of frames required for numnber of sge's (sge_count)
- */
-
-static u32 megasas_get_frame_count(u8 sge_count, u8 frame_type)
-{
- int num_cnt;
- int sge_bytes;
- u32 sge_sz;
- u32 frame_count=0;
-
- sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
- sizeof(struct megasas_sge32);
-
- /*
- * Main frame can contain 2 SGEs for 64-bit SGLs and
- * 3 SGEs for 32-bit SGLs for ldio &
- * 1 SGEs for 64-bit SGLs and
- * 2 SGEs for 32-bit SGLs for pthru frame
- */
- if (unlikely(frame_type == PTHRU_FRAME)) {
- if (IS_DMA64)
- num_cnt = sge_count - 1;
- else
- num_cnt = sge_count - 2;
- } else {
- if (IS_DMA64)
- num_cnt = sge_count - 2;
- else
- num_cnt = sge_count - 3;
- }
-
- if(num_cnt>0){
- sge_bytes = sge_sz * num_cnt;
-
- frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
- ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
- }
- /* Main frame */
- frame_count +=1;
-
- if (frame_count > 7)
- frame_count = 8;
- return frame_count;
-}
-
-/**
- * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
- * @instance: Adapter soft state
- * @scp: SCSI command
- * @cmd: Command to be prepared in
- *
- * This function prepares CDB commands. These are typcially pass-through
- * commands to the devices.
- */
-static int
-megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
- struct megasas_cmd *cmd)
-{
- u32 is_logical;
- u32 device_id;
- u16 flags = 0;
- struct megasas_pthru_frame *pthru;
-
- is_logical = MEGASAS_IS_LOGICAL(scp);
- device_id = MEGASAS_DEV_INDEX(instance, scp);
- pthru = (struct megasas_pthru_frame *)cmd->frame;
-
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
- flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
- flags = MFI_FRAME_DIR_READ;
- else if (scp->sc_data_direction == PCI_DMA_NONE)
- flags = MFI_FRAME_DIR_NONE;
-
- /*
- * Prepare the DCDB frame
- */
- pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
- pthru->cmd_status = 0x0;
- pthru->scsi_status = 0x0;
- pthru->target_id = device_id;
- pthru->lun = scp->device->lun;
- pthru->cdb_len = scp->cmd_len;
- pthru->timeout = 0;
- pthru->flags = flags;
- pthru->data_xfer_len = scsi_bufflen(scp);
-
- memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
-
- /*
- * Construct SGL
- */
- if (IS_DMA64) {
- pthru->flags |= MFI_FRAME_SGL64;
- pthru->sge_count = megasas_make_sgl64(instance, scp,
- &pthru->sgl);
- } else
- pthru->sge_count = megasas_make_sgl32(instance, scp,
- &pthru->sgl);
-
- /*
- * Sense info specific
- */
- pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
- pthru->sense_buf_phys_addr_hi = 0;
- pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
-
- /*
- * Compute the total number of frames this command consumes. FW uses
- * this number to pull sufficient number of frames from host memory.
- */
- cmd->frame_count = megasas_get_frame_count(pthru->sge_count,
- PTHRU_FRAME);
-
- return cmd->frame_count;
-}
-
-/**
- * megasas_build_ldio - Prepares IOs to logical devices
- * @instance: Adapter soft state
- * @scp: SCSI command
- * @cmd: Command to to be prepared
- *
- * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
- */
-static int
-megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
- struct megasas_cmd *cmd)
-{
- u32 device_id;
- u8 sc = scp->cmnd[0];
- u16 flags = 0;
- struct megasas_io_frame *ldio;
-
- device_id = MEGASAS_DEV_INDEX(instance, scp);
- ldio = (struct megasas_io_frame *)cmd->frame;
-
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
- flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
- flags = MFI_FRAME_DIR_READ;
-
- /*
- * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
- */
- ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
- ldio->cmd_status = 0x0;
- ldio->scsi_status = 0x0;
- ldio->target_id = device_id;
- ldio->timeout = 0;
- ldio->reserved_0 = 0;
- ldio->pad_0 = 0;
- ldio->flags = flags;
- ldio->start_lba_hi = 0;
- ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
-
- /*
- * 6-byte READ(0x08) or WRITE(0x0A) cdb
- */
- if (scp->cmd_len == 6) {
- ldio->lba_count = (u32) scp->cmnd[4];
- ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
- ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
-
- ldio->start_lba_lo &= 0x1FFFFF;
- }
-
- /*
- * 10-byte READ(0x28) or WRITE(0x2A) cdb
- */
- else if (scp->cmd_len == 10) {
- ldio->lba_count = (u32) scp->cmnd[8] |
- ((u32) scp->cmnd[7] << 8);
- ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
- }
-
- /*
- * 12-byte READ(0xA8) or WRITE(0xAA) cdb
- */
- else if (scp->cmd_len == 12) {
- ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
- ((u32) scp->cmnd[7] << 16) |
- ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
-
- ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
- }
-
- /*
- * 16-byte READ(0x88) or WRITE(0x8A) cdb
- */
- else if (scp->cmd_len == 16) {
- ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
- ((u32) scp->cmnd[11] << 16) |
- ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
-
- ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
- ((u32) scp->cmnd[7] << 16) |
- ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
-
- ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
- ((u32) scp->cmnd[3] << 16) |
- ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
-
- }
-
- /*
- * Construct SGL
- */
- if (IS_DMA64) {
- ldio->flags |= MFI_FRAME_SGL64;
- ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
- } else
- ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
-
- /*
- * Sense info specific
- */
- ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
- ldio->sense_buf_phys_addr_hi = 0;
- ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
-
- /*
- * Compute the total number of frames this command consumes. FW uses
- * this number to pull sufficient number of frames from host memory.
- */
- cmd->frame_count = megasas_get_frame_count(ldio->sge_count, IO_FRAME);
-
- return cmd->frame_count;
-}
-
-/**
- * megasas_is_ldio - Checks if the cmd is for logical drive
- * @scmd: SCSI command
- *
- * Called by megasas_queue_command to find out if the command to be queued
- * is a logical drive command
- */
-static inline int megasas_is_ldio(struct scsi_cmnd *cmd)
-{
- if (!MEGASAS_IS_LOGICAL(cmd))
- return 0;
- switch (cmd->cmnd[0]) {
- case READ_10:
- case WRITE_10:
- case READ_12:
- case WRITE_12:
- case READ_6:
- case WRITE_6:
- case READ_16:
- case WRITE_16:
- return 1;
- default:
- return 0;
- }
-}
-
- /**
- * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
- * in FW
- * @instance: Adapter soft state
- */
-static inline void
-megasas_dump_pending_frames(struct megasas_instance *instance)
-{
- struct megasas_cmd *cmd;
- int i,n;
- union megasas_sgl *mfi_sgl;
- struct megasas_io_frame *ldio;
- struct megasas_pthru_frame *pthru;
- u32 sgcount;
- u32 max_cmd = instance->max_fw_cmds;
-
- printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
- printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
- if (IS_DMA64)
- printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
- else
- printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
-
- printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
- for (i = 0; i < max_cmd; i++) {
- cmd = instance->cmd_list[i];
- if(!cmd->scmd)
- continue;
- printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
- if (megasas_is_ldio(cmd->scmd)){
- ldio = (struct megasas_io_frame *)cmd->frame;
- mfi_sgl = &ldio->sgl;
- sgcount = ldio->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount);
- }
- else {
- pthru = (struct megasas_pthru_frame *) cmd->frame;
- mfi_sgl = &pthru->sgl;
- sgcount = pthru->sge_count;
- printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount);
- }
- if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
- for (n = 0; n < sgcount; n++){
- if (IS_DMA64)
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ;
- else
- printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ;
- }
- }
- printk(KERN_ERR "\n");
- } /*for max_cmd*/
- printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
- for (i = 0; i < max_cmd; i++) {
-
- cmd = instance->cmd_list[i];
-
- if(cmd->sync_cmd == 1){
- printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
- }
- }
- printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
-}
-
-/**
- * megasas_queue_command - Queue entry point
- * @scmd: SCSI command to be queued
- * @done: Callback entry point
- */
-static int
-megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
-{
- u32 frame_count;
- struct megasas_cmd *cmd;
- struct megasas_instance *instance;
-
- instance = (struct megasas_instance *)
- scmd->device->host->hostdata;
-
- /* Don't process if we have already declared adapter dead */
- if (instance->hw_crit_error)
- return SCSI_MLQUEUE_HOST_BUSY;
-
- scmd->scsi_done = done;
- scmd->result = 0;
-
- if (MEGASAS_IS_LOGICAL(scmd) &&
- (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
- scmd->result = DID_BAD_TARGET << 16;
- goto out_done;
- }
-
- switch (scmd->cmnd[0]) {
- case SYNCHRONIZE_CACHE:
- /*
- * FW takes care of flush cache on its own
- * No need to send it down
- */
- scmd->result = DID_OK << 16;
- goto out_done;
- default:
- break;
- }
-
- cmd = megasas_get_cmd(instance);
- if (!cmd)
- return SCSI_MLQUEUE_HOST_BUSY;
-
- /*
- * Logical drive command
- */
- if (megasas_is_ldio(scmd))
- frame_count = megasas_build_ldio(instance, scmd, cmd);
- else
- frame_count = megasas_build_dcdb(instance, scmd, cmd);
-
- if (!frame_count)
- goto out_return_cmd;
-
- cmd->scmd = scmd;
- scmd->SCp.ptr = (char *)cmd;
-
- /*
- * Issue the command to the FW
- */
- atomic_inc(&instance->fw_outstanding);
-
- instance->instancet->fire_cmd(cmd->frame_phys_addr ,cmd->frame_count-1,instance->reg_set);
- /*
- * Check if we have pend cmds to be completed
- */
- if (poll_mode_io && atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
-
-
- return 0;
-
- out_return_cmd:
- megasas_return_cmd(instance, cmd);
- out_done:
- done(scmd);
- return 0;
-}
-
-static int megasas_slave_configure(struct scsi_device *sdev)
-{
- /*
- * Don't export physical disk devices to the disk driver.
- *
- * FIXME: Currently we don't export them to the midlayer at all.
- * That will be fixed once LSI engineers have audited the
- * firmware for possible issues.
- */
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && sdev->type == TYPE_DISK)
- return -ENXIO;
-
- /*
- * The RAID firmware may require extended timeouts.
- */
- if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS)
- sdev->timeout = MEGASAS_DEFAULT_CMD_TIMEOUT * HZ;
- return 0;
-}
-
-/**
- * megasas_complete_cmd_dpc - Returns FW's controller structure
- * @instance_addr: Address of adapter soft state
- *
- * Tasklet to complete cmds
- */
-static void megasas_complete_cmd_dpc(unsigned long instance_addr)
-{
- u32 producer;
- u32 consumer;
- u32 context;
- struct megasas_cmd *cmd;
- struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
- unsigned long flags;
-
- /* If we have already declared adapter dead, donot complete cmds */
- if (instance->hw_crit_error)
- return;
-
- spin_lock_irqsave(&instance->completion_lock, flags);
-
- producer = *instance->producer;
- consumer = *instance->consumer;
-
- while (consumer != producer) {
- context = instance->reply_queue[consumer];
-
- cmd = instance->cmd_list[context];
-
- megasas_complete_cmd(instance, cmd, DID_OK);
-
- consumer++;
- if (consumer == (instance->max_fw_cmds + 1)) {
- consumer = 0;
- }
- }
-
- *instance->consumer = producer;
-
- spin_unlock_irqrestore(&instance->completion_lock, flags);
-
- /*
- * Check if we can restore can_queue
- */
- if (instance->flag & MEGASAS_FW_BUSY
- && time_after(jiffies, instance->last_time + 5 * HZ)
- && atomic_read(&instance->fw_outstanding) < 17) {
-
- spin_lock_irqsave(instance->host->host_lock, flags);
- instance->flag &= ~MEGASAS_FW_BUSY;
- instance->host->can_queue =
- instance->max_fw_cmds - MEGASAS_INT_CMDS;
-
- spin_unlock_irqrestore(instance->host->host_lock, flags);
- }
-}
-
-/**
- * megasas_wait_for_outstanding - Wait for all outstanding cmds
- * @instance: Adapter soft state
- *
- * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
- * complete all its outstanding commands. Returns error if one or more IOs
- * are pending after this time period. It also marks the controller dead.
- */
-static int megasas_wait_for_outstanding(struct megasas_instance *instance)
-{
- int i;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
-
- for (i = 0; i < wait_time; i++) {
-
- int outstanding = atomic_read(&instance->fw_outstanding);
-
- if (!outstanding)
- break;
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
- "commands to complete\n",i,outstanding);
- /*
- * Call cmd completion routine. Cmd to be
- * be completed directly without depending on isr.
- */
- megasas_complete_cmd_dpc((unsigned long)instance);
- }
-
- msleep(1000);
- }
-
- if (atomic_read(&instance->fw_outstanding)) {
- /*
- * Send signal to FW to stop processing any pending cmds.
- * The controller will be taken offline by the OS now.
- */
- writel(MFI_STOP_ADP,
- &instance->reg_set->inbound_doorbell);
- megasas_dump_pending_frames(instance);
- instance->hw_crit_error = 1;
- return FAILED;
- }
-
- return SUCCESS;
-}
-
-/**
- * megasas_generic_reset - Generic reset routine
- * @scmd: Mid-layer SCSI command
- *
- * This routine implements a generic reset handler for device, bus and host
- * reset requests. Device, bus and host specific reset handlers can use this
- * function after they do their specific tasks.
- */
-static int megasas_generic_reset(struct scsi_cmnd *scmd)
-{
- int ret_val;
- struct megasas_instance *instance;
-
- instance = (struct megasas_instance *)scmd->device->host->hostdata;
-
- scmd_printk(KERN_NOTICE, scmd, "megasas: RESET -%ld cmd=%x retries=%x\n",
- scmd->serial_number, scmd->cmnd[0], scmd->retries);
-
- if (instance->hw_crit_error) {
- printk(KERN_ERR "megasas: cannot recover from previous reset "
- "failures\n");
- return FAILED;
- }
-
- ret_val = megasas_wait_for_outstanding(instance);
- if (ret_val == SUCCESS)
- printk(KERN_NOTICE "megasas: reset successful \n");
- else
- printk(KERN_ERR "megasas: failed to do reset\n");
-
- return ret_val;
-}
-
-/**
- * megasas_reset_timer - quiesce the adapter if required
- * @scmd: scsi cmnd
- *
- * Sets the FW busy flag and reduces the host->can_queue if the
- * cmd has not been completed within the timeout period.
- */
-static enum
-scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
-{
- struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
- struct megasas_instance *instance;
- unsigned long flags;
-
- if (time_after(jiffies, scmd->jiffies_at_alloc +
- (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
- return EH_NOT_HANDLED;
- }
-
- instance = cmd->instance;
- if (!(instance->flag & MEGASAS_FW_BUSY)) {
- /* FW is busy, throttle IO */
- spin_lock_irqsave(instance->host->host_lock, flags);
-
- instance->host->can_queue = 16;
- instance->last_time = jiffies;
- instance->flag |= MEGASAS_FW_BUSY;
-
- spin_unlock_irqrestore(instance->host->host_lock, flags);
- }
- return EH_RESET_TIMER;
-}
-
-/**
- * megasas_reset_device - Device reset handler entry point
- */
-static int megasas_reset_device(struct scsi_cmnd *scmd)
-{
- int ret;
-
- /*
- * First wait for all commands to complete
- */
- ret = megasas_generic_reset(scmd);
-
- return ret;
-}
-
-/**
- * megasas_reset_bus_host - Bus & host reset handler entry point
- */
-static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
-{
- int ret;
-
- /*
- * First wait for all commands to complete
- */
- ret = megasas_generic_reset(scmd);
-
- return ret;
-}
-
-/**
- * megasas_bios_param - Returns disk geometry for a disk
- * @sdev: device handle
- * @bdev: block device
- * @capacity: drive capacity
- * @geom: geometry parameters
- */
-static int
-megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int geom[])
-{
- int heads;
- int sectors;
- sector_t cylinders;
- unsigned long tmp;
- /* Default heads (64) & sectors (32) */
- heads = 64;
- sectors = 32;
-
- tmp = heads * sectors;
- cylinders = capacity;
-
- sector_div(cylinders, tmp);
-
- /*
- * Handle extended translation size for logical drives > 1Gb
- */
-
- if (capacity >= 0x200000) {
- heads = 255;
- sectors = 63;
- tmp = heads*sectors;
- cylinders = capacity;
- sector_div(cylinders, tmp);
- }
-
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
-
- return 0;
-}
-
-/**
- * megasas_service_aen - Processes an event notification
- * @instance: Adapter soft state
- * @cmd: AEN command completed by the ISR
- *
- * For AEN, driver sends a command down to FW that is held by the FW till an
- * event occurs. When an event of interest occurs, FW completes the command
- * that it was previously holding.
- *
- * This routines sends SIGIO signal to processes that have registered with the
- * driver for AEN.
- */
-static void
-megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
-{
- /*
- * Don't signal app if it is just an aborted previously registered aen
- */
- if (!cmd->abort_aen)
- kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
- else
- cmd->abort_aen = 0;
-
- instance->aen_cmd = NULL;
- megasas_return_cmd(instance, cmd);
-}
-
-/*
- * Scsi host template for megaraid_sas driver
- */
-static struct scsi_host_template megasas_template = {
-
- .module = THIS_MODULE,
- .name = "LSI SAS based MegaRAID driver",
- .proc_name = "megaraid_sas",
- .slave_configure = megasas_slave_configure,
- .queuecommand = megasas_queue_command,
- .eh_device_reset_handler = megasas_reset_device,
- .eh_bus_reset_handler = megasas_reset_bus_host,
- .eh_host_reset_handler = megasas_reset_bus_host,
- .eh_timed_out = megasas_reset_timer,
- .bios_param = megasas_bios_param,
- .use_clustering = ENABLE_CLUSTERING,
-};
-
-/**
- * megasas_complete_int_cmd - Completes an internal command
- * @instance: Adapter soft state
- * @cmd: Command to be completed
- *
- * The megasas_issue_blocked_cmd() function waits for a command to complete
- * after it issues a command. This function wakes up that waiting routine by
- * calling wake_up() on the wait queue.
- */
-static void
-megasas_complete_int_cmd(struct megasas_instance *instance,
- struct megasas_cmd *cmd)
-{
- cmd->cmd_status = cmd->frame->io.cmd_status;
-
- if (cmd->cmd_status == ENODATA) {
- cmd->cmd_status = 0;
- }
- wake_up(&instance->int_cmd_wait_q);
-}
-
-/**
- * megasas_complete_abort - Completes aborting a command
- * @instance: Adapter soft state
- * @cmd: Cmd that was issued to abort another cmd
- *
- * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
- * after it issues an abort on a previously issued command. This function
- * wakes up all functions waiting on the same wait queue.
- */
-static void
-megasas_complete_abort(struct megasas_instance *instance,
- struct megasas_cmd *cmd)
-{
- if (cmd->sync_cmd) {
- cmd->sync_cmd = 0;
- cmd->cmd_status = 0;
- wake_up(&instance->abort_cmd_wait_q);
- }
-
- return;
-}
-
-/**
- * megasas_complete_cmd - Completes a command
- * @instance: Adapter soft state
- * @cmd: Command to be completed
- * @alt_status: If non-zero, use this value as status to
- * SCSI mid-layer instead of the value returned
- * by the FW. This should be used if caller wants
- * an alternate status (as in the case of aborted
- * commands)
- */
-static void
-megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
- u8 alt_status)
-{
- int exception = 0;
- struct megasas_header *hdr = &cmd->frame->hdr;
-
- if (cmd->scmd)
- cmd->scmd->SCp.ptr = NULL;
-
- switch (hdr->cmd) {
-
- case MFI_CMD_PD_SCSI_IO:
- case MFI_CMD_LD_SCSI_IO:
-
- /*
- * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
- * issued either through an IO path or an IOCTL path. If it
- * was via IOCTL, we will send it to internal completion.
- */
- if (cmd->sync_cmd) {
- cmd->sync_cmd = 0;
- megasas_complete_int_cmd(instance, cmd);
- break;
- }
-
- case MFI_CMD_LD_READ:
- case MFI_CMD_LD_WRITE:
-
- if (alt_status) {
- cmd->scmd->result = alt_status << 16;
- exception = 1;
- }
-
- if (exception) {
-
- atomic_dec(&instance->fw_outstanding);
-
- scsi_dma_unmap(cmd->scmd);
- cmd->scmd->scsi_done(cmd->scmd);
- megasas_return_cmd(instance, cmd);
-
- break;
- }
-
- switch (hdr->cmd_status) {
-
- case MFI_STAT_OK:
- cmd->scmd->result = DID_OK << 16;
- break;
-
- case MFI_STAT_SCSI_IO_FAILED:
- case MFI_STAT_LD_INIT_IN_PROGRESS:
- cmd->scmd->result =
- (DID_ERROR << 16) | hdr->scsi_status;
- break;
-
- case MFI_STAT_SCSI_DONE_WITH_ERROR:
-
- cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
-
- if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
- memset(cmd->scmd->sense_buffer, 0,
- SCSI_SENSE_BUFFERSIZE);
- memcpy(cmd->scmd->sense_buffer, cmd->sense,
- hdr->sense_len);
-
- cmd->scmd->result |= DRIVER_SENSE << 24;
- }
-
- break;
-
- case MFI_STAT_LD_OFFLINE:
- case MFI_STAT_DEVICE_NOT_FOUND:
- cmd->scmd->result = DID_BAD_TARGET << 16;
- break;
-
- default:
- printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
- hdr->cmd_status);
- cmd->scmd->result = DID_ERROR << 16;
- break;
- }
-
- atomic_dec(&instance->fw_outstanding);
-
- scsi_dma_unmap(cmd->scmd);
- cmd->scmd->scsi_done(cmd->scmd);
- megasas_return_cmd(instance, cmd);
-
- break;
-
- case MFI_CMD_SMP:
- case MFI_CMD_STP:
- case MFI_CMD_DCMD:
-
- /*
- * See if got an event notification
- */
- if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
- megasas_service_aen(instance, cmd);
- else
- megasas_complete_int_cmd(instance, cmd);
-
- break;
-
- case MFI_CMD_ABORT:
- /*
- * Cmd issued to abort another cmd returned
- */
- megasas_complete_abort(instance, cmd);
- break;
-
- default:
- printk("megasas: Unknown command completed! [0x%X]\n",
- hdr->cmd);
- break;
- }
-}
-
-/**
- * megasas_deplete_reply_queue - Processes all completed commands
- * @instance: Adapter soft state
- * @alt_status: Alternate status to be returned to
- * SCSI mid-layer instead of the status
- * returned by the FW
- */
-static int
-megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
-{
- /*
- * Check if it is our interrupt
- * Clear the interrupt
- */
- if(instance->instancet->clear_intr(instance->reg_set))
- return IRQ_NONE;
-
- if (instance->hw_crit_error)
- goto out_done;
- /*
- * Schedule the tasklet for cmd completion
- */
- tasklet_schedule(&instance->isr_tasklet);
-out_done:
- return IRQ_HANDLED;
-}
-
-/**
- * megasas_isr - isr entry point
- */
-static irqreturn_t megasas_isr(int irq, void *devp)
-{
- return megasas_deplete_reply_queue((struct megasas_instance *)devp,
- DID_OK);
-}
-
-/**
- * megasas_transition_to_ready - Move the FW to READY state
- * @instance: Adapter soft state
- *
- * During the initialization, FW passes can potentially be in any one of
- * several possible states. If the FW in operational, waiting-for-handshake
- * states, driver must take steps to bring it to ready state. Otherwise, it
- * has to wait for the ready state.
- */
-static int
-megasas_transition_to_ready(struct megasas_instance* instance)
-{
- int i;
- u8 max_wait;
- u32 fw_state;
- u32 cur_state;
-
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
-
- if (fw_state != MFI_STATE_READY)
- printk(KERN_INFO "megasas: Waiting for FW to come to ready"
- " state\n");
-
- while (fw_state != MFI_STATE_READY) {
-
- switch (fw_state) {
-
- case MFI_STATE_FAULT:
-
- printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
- return -ENODEV;
-
- case MFI_STATE_WAIT_HANDSHAKE:
- /*
- * Set the CLR bit in inbound doorbell
- */
- writel(MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
- &instance->reg_set->inbound_doorbell);
-
- max_wait = 2;
- cur_state = MFI_STATE_WAIT_HANDSHAKE;
- break;
-
- case MFI_STATE_BOOT_MESSAGE_PENDING:
- writel(MFI_INIT_HOTPLUG,
- &instance->reg_set->inbound_doorbell);
-
- max_wait = 10;
- cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
- break;
-
- case MFI_STATE_OPERATIONAL:
- /*
- * Bring it to READY state; assuming max wait 10 secs
- */
- instance->instancet->disable_intr(instance->reg_set);
- writel(MFI_RESET_FLAGS, &instance->reg_set->inbound_doorbell);
-
- max_wait = 60;
- cur_state = MFI_STATE_OPERATIONAL;
- break;
-
- case MFI_STATE_UNDEFINED:
- /*
- * This state should not last for more than 2 seconds
- */
- max_wait = 2;
- cur_state = MFI_STATE_UNDEFINED;
- break;
-
- case MFI_STATE_BB_INIT:
- max_wait = 2;
- cur_state = MFI_STATE_BB_INIT;
- break;
-
- case MFI_STATE_FW_INIT:
- max_wait = 20;
- cur_state = MFI_STATE_FW_INIT;
- break;
-
- case MFI_STATE_FW_INIT_2:
- max_wait = 20;
- cur_state = MFI_STATE_FW_INIT_2;
- break;
-
- case MFI_STATE_DEVICE_SCAN:
- max_wait = 20;
- cur_state = MFI_STATE_DEVICE_SCAN;
- break;
-
- case MFI_STATE_FLUSH_CACHE:
- max_wait = 20;
- cur_state = MFI_STATE_FLUSH_CACHE;
- break;
-
- default:
- printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
- fw_state);
- return -ENODEV;
- }
-
- /*
- * The cur_state should not last for more than max_wait secs
- */
- for (i = 0; i < (max_wait * 1000); i++) {
- fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &
- MFI_STATE_MASK ;
-
- if (fw_state == cur_state) {
- msleep(1);
- } else
- break;
- }
-
- /*
- * Return error if fw_state hasn't changed after max_wait
- */
- if (fw_state == cur_state) {
- printk(KERN_DEBUG "FW state [%d] hasn't changed "
- "in %d secs\n", fw_state, max_wait);
- return -ENODEV;
- }
- };
- printk(KERN_INFO "megasas: FW now in Ready state\n");
-
- return 0;
-}
-
-/**
- * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
- * @instance: Adapter soft state
- */
-static void megasas_teardown_frame_pool(struct megasas_instance *instance)
-{
- int i;
- u32 max_cmd = instance->max_fw_cmds;
- struct megasas_cmd *cmd;
-
- if (!instance->frame_dma_pool)
- return;
-
- /*
- * Return all frames to pool
- */
- for (i = 0; i < max_cmd; i++) {
-
- cmd = instance->cmd_list[i];
-
- if (cmd->frame)
- pci_pool_free(instance->frame_dma_pool, cmd->frame,
- cmd->frame_phys_addr);
-
- if (cmd->sense)
- pci_pool_free(instance->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
- }
-
- /*
- * Now destroy the pool itself
- */
- pci_pool_destroy(instance->frame_dma_pool);
- pci_pool_destroy(instance->sense_dma_pool);
-
- instance->frame_dma_pool = NULL;
- instance->sense_dma_pool = NULL;
-}
-
-/**
- * megasas_create_frame_pool - Creates DMA pool for cmd frames
- * @instance: Adapter soft state
- *
- * Each command packet has an embedded DMA memory buffer that is used for
- * filling MFI frame and the SG list that immediately follows the frame. This
- * function creates those DMA memory buffers for each command packet by using
- * PCI pool facility.
- */
-static int megasas_create_frame_pool(struct megasas_instance *instance)
-{
- int i;
- u32 max_cmd;
- u32 sge_sz;
- u32 sgl_sz;
- u32 total_sz;
- u32 frame_count;
- struct megasas_cmd *cmd;
-
- max_cmd = instance->max_fw_cmds;
-
- /*
- * Size of our frame is 64 bytes for MFI frame, followed by max SG
- * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
- */
- sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
- sizeof(struct megasas_sge32);
-
- /*
- * Calculated the number of 64byte frames required for SGL
- */
- sgl_sz = sge_sz * instance->max_num_sge;
- frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
-
- /*
- * We need one extra frame for the MFI command
- */
- frame_count++;
-
- total_sz = MEGAMFI_FRAME_SIZE * frame_count;
- /*
- * Use DMA pool facility provided by PCI layer
- */
- instance->frame_dma_pool = pci_pool_create("megasas frame pool",
- instance->pdev, total_sz, 64,
- 0);
-
- if (!instance->frame_dma_pool) {
- printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
- return -ENOMEM;
- }
-
- instance->sense_dma_pool = pci_pool_create("megasas sense pool",
- instance->pdev, 128, 4, 0);
-
- if (!instance->sense_dma_pool) {
- printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
-
- pci_pool_destroy(instance->frame_dma_pool);
- instance->frame_dma_pool = NULL;
-
- return -ENOMEM;
- }
-
- /*
- * Allocate and attach a frame to each of the commands in cmd_list.
- * By making cmd->index as the context instead of the &cmd, we can
- * always use 32bit context regardless of the architecture
- */
- for (i = 0; i < max_cmd; i++) {
-
- cmd = instance->cmd_list[i];
-
- cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
- GFP_KERNEL, &cmd->frame_phys_addr);
-
- cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
-
- /*
- * megasas_teardown_frame_pool() takes care of freeing
- * whatever has been allocated
- */
- if (!cmd->frame || !cmd->sense) {
- printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
- megasas_teardown_frame_pool(instance);
- return -ENOMEM;
- }
-
- cmd->frame->io.context = cmd->index;
- }
-
- return 0;
-}
-
-/**
- * megasas_free_cmds - Free all the cmds in the free cmd pool
- * @instance: Adapter soft state
- */
-static void megasas_free_cmds(struct megasas_instance *instance)
-{
- int i;
- /* First free the MFI frame pool */
- megasas_teardown_frame_pool(instance);
-
- /* Free all the commands in the cmd_list */
- for (i = 0; i < instance->max_fw_cmds; i++)
- kfree(instance->cmd_list[i]);
-
- /* Free the cmd_list buffer itself */
- kfree(instance->cmd_list);
- instance->cmd_list = NULL;
-
- INIT_LIST_HEAD(&instance->cmd_pool);
-}
-
-/**
- * megasas_alloc_cmds - Allocates the command packets
- * @instance: Adapter soft state
- *
- * Each command that is issued to the FW, whether IO commands from the OS or
- * internal commands like IOCTLs, are wrapped in local data structure called
- * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
- * the FW.
- *
- * Each frame has a 32-bit field called context (tag). This context is used
- * to get back the megasas_cmd from the frame when a frame gets completed in
- * the ISR. Typically the address of the megasas_cmd itself would be used as
- * the context. But we wanted to keep the differences between 32 and 64 bit
- * systems to the mininum. We always use 32 bit integers for the context. In
- * this driver, the 32 bit values are the indices into an array cmd_list.
- * This array is used only to look up the megasas_cmd given the context. The
- * free commands themselves are maintained in a linked list called cmd_pool.
- */
-static int megasas_alloc_cmds(struct megasas_instance *instance)
-{
- int i;
- int j;
- u32 max_cmd;
- struct megasas_cmd *cmd;
-
- max_cmd = instance->max_fw_cmds;
-
- /*
- * instance->cmd_list is an array of struct megasas_cmd pointers.
- * Allocate the dynamic array first and then allocate individual
- * commands.
- */
- instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
-
- if (!instance->cmd_list) {
- printk(KERN_DEBUG "megasas: out of memory\n");
- return -ENOMEM;
- }
-
-
- for (i = 0; i < max_cmd; i++) {
- instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
- GFP_KERNEL);
-
- if (!instance->cmd_list[i]) {
-
- for (j = 0; j < i; j++)
- kfree(instance->cmd_list[j]);
-
- kfree(instance->cmd_list);
- instance->cmd_list = NULL;
-
- return -ENOMEM;
- }
- }
-
- /*
- * Add all the commands to command pool (instance->cmd_pool)
- */
- for (i = 0; i < max_cmd; i++) {
- cmd = instance->cmd_list[i];
- memset(cmd, 0, sizeof(struct megasas_cmd));
- cmd->index = i;
- cmd->instance = instance;
-
- list_add_tail(&cmd->list, &instance->cmd_pool);
- }
-
- /*
- * Create a frame pool and assign one frame to each cmd
- */
- if (megasas_create_frame_pool(instance)) {
- printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
- megasas_free_cmds(instance);
- }
-
- return 0;
-}
-
-/**
- * megasas_get_controller_info - Returns FW's controller structure
- * @instance: Adapter soft state
- * @ctrl_info: Controller information structure
- *
- * Issues an internal command (DCMD) to get the FW's controller structure.
- * This information is mainly used to find out the maximum IO transfer per
- * command supported by the FW.
- */
-static int
-megasas_get_ctrl_info(struct megasas_instance *instance,
- struct megasas_ctrl_info *ctrl_info)
-{
- int ret = 0;
- struct megasas_cmd *cmd;
- struct megasas_dcmd_frame *dcmd;
- struct megasas_ctrl_info *ci;
- dma_addr_t ci_h = 0;
-
- cmd = megasas_get_cmd(instance);
-
- if (!cmd) {
- printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
- return -ENOMEM;
- }
-
- dcmd = &cmd->frame->dcmd;
-
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct megasas_ctrl_info), &ci_h);
-
- if (!ci) {
- printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
- memset(ci, 0, sizeof(*ci));
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0xFF;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
- dcmd->timeout = 0;
- dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
- dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = ci_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
-
- if (!megasas_issue_polled(instance, cmd)) {
- ret = 0;
- memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
- } else {
- ret = -1;
- }
-
- pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
- ci, ci_h);
-
- megasas_return_cmd(instance, cmd);
- return ret;
-}
-
-/**
- * megasas_issue_init_mfi - Initializes the FW
- * @instance: Adapter soft state
- *
- * Issues the INIT MFI cmd
- */
-static int
-megasas_issue_init_mfi(struct megasas_instance *instance)
-{
- u32 context;
-
- struct megasas_cmd *cmd;
-
- struct megasas_init_frame *init_frame;
- struct megasas_init_queue_info *initq_info;
- dma_addr_t init_frame_h;
- dma_addr_t initq_info_h;
-
- /*
- * Prepare a init frame. Note the init frame points to queue info
- * structure. Each frame has SGL allocated after first 64 bytes. For
- * this frame - since we don't need any SGL - we use SGL's space as
- * queue info structure
- *
- * We will not get a NULL command below. We just created the pool.
- */
- cmd = megasas_get_cmd(instance);
-
- init_frame = (struct megasas_init_frame *)cmd->frame;
- initq_info = (struct megasas_init_queue_info *)
- ((unsigned long)init_frame + 64);
-
- init_frame_h = cmd->frame_phys_addr;
- initq_info_h = init_frame_h + 64;
-
- context = init_frame->context;
- memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
- memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
- init_frame->context = context;
-
- initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
- initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
-
- initq_info->producer_index_phys_addr_lo = instance->producer_h;
- initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
-
- init_frame->cmd = MFI_CMD_INIT;
- init_frame->cmd_status = 0xFF;
- init_frame->queue_info_new_phys_addr_lo = initq_info_h;
-
- init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
-
- /*
- * disable the intr before firing the init frame to FW
- */
- instance->instancet->disable_intr(instance->reg_set);
-
- /*
- * Issue the init frame in polled mode
- */
-
- if (megasas_issue_polled(instance, cmd)) {
- printk(KERN_ERR "megasas: Failed to init firmware\n");
- megasas_return_cmd(instance, cmd);
- goto fail_fw_init;
- }
-
- megasas_return_cmd(instance, cmd);
-
- return 0;
-
-fail_fw_init:
- return -EINVAL;
-}
-
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance: Adapter soft state
- * @timer: timer object to be initialized
- * @fn: timer function
- * @interval: time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval)
-{
- init_timer(timer);
- timer->expires = jiffies + interval;
- timer->data = (unsigned long)instance;
- timer->function = fn;
- add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr: Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
- struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
-
- if (atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
-
- /* Restart timer */
- if (poll_mode_io)
- mod_timer(&instance->io_completion_timer,
- jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
-/**
- * megasas_init_mfi - Initializes the FW
- * @instance: Adapter soft state
- *
- * This is the main function for initializing MFI firmware.
- */
-static int megasas_init_mfi(struct megasas_instance *instance)
-{
- u32 context_sz;
- u32 reply_q_sz;
- u32 max_sectors_1;
- u32 max_sectors_2;
- u32 tmp_sectors;
- struct megasas_register_set __iomem *reg_set;
- struct megasas_ctrl_info *ctrl_info;
- /*
- * Map the message registers
- */
- instance->base_addr = pci_resource_start(instance->pdev, 0);
-
- if (pci_request_regions(instance->pdev, "megasas: LSI")) {
- printk(KERN_DEBUG "megasas: IO memory region busy!\n");
- return -EBUSY;
- }
-
- instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
-
- if (!instance->reg_set) {
- printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
- goto fail_ioremap;
- }
-
- reg_set = instance->reg_set;
-
- switch(instance->pdev->device)
- {
- case PCI_DEVICE_ID_LSI_SAS1078R:
- case PCI_DEVICE_ID_LSI_SAS1078DE:
- instance->instancet = &megasas_instance_template_ppc;
- break;
- case PCI_DEVICE_ID_LSI_SAS1064R:
- case PCI_DEVICE_ID_DELL_PERC5:
- default:
- instance->instancet = &megasas_instance_template_xscale;
- break;
- }
-
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance))
- goto fail_ready_state;
-
- /*
- * Get various operational parameters from status register
- */
- instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
- /*
- * Reduce the max supported cmds by 1. This is to ensure that the
- * reply_q_sz (1 more than the max cmd that driver may send)
- * does not exceed max cmds that the FW can support
- */
- instance->max_fw_cmds = instance->max_fw_cmds-1;
- instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
- 0x10;
- /*
- * Create a pool of commands
- */
- if (megasas_alloc_cmds(instance))
- goto fail_alloc_cmds;
-
- /*
- * Allocate memory for reply queue. Length of reply queue should
- * be _one_ more than the maximum commands handled by the firmware.
- *
- * Note: When FW completes commands, it places corresponding contex
- * values in this circular reply queue. This circular queue is a fairly
- * typical producer-consumer queue. FW is the producer (of completed
- * commands) and the driver is the consumer.
- */
- context_sz = sizeof(u32);
- reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
-
- instance->reply_queue = pci_alloc_consistent(instance->pdev,
- reply_q_sz,
- &instance->reply_queue_h);
-
- if (!instance->reply_queue) {
- printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
- goto fail_reply_queue;
- }
-
- if (megasas_issue_init_mfi(instance))
- goto fail_fw_init;
-
- ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
-
- /*
- * Compute the max allowed sectors per IO: The controller info has two
- * limits on max sectors. Driver should use the minimum of these two.
- *
- * 1 << stripe_sz_ops.min = max sectors per strip
- *
- * Note that older firmwares ( < FW ver 30) didn't report information
- * to calculate max_sectors_1. So the number ended up as zero always.
- */
- tmp_sectors = 0;
- if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
-
- max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
- ctrl_info->max_strips_per_io;
- max_sectors_2 = ctrl_info->max_request_size;
-
- tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
- }
-
- instance->max_sectors_per_req = instance->max_num_sge *
- PAGE_SIZE / 512;
- if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
- instance->max_sectors_per_req = tmp_sectors;
-
- kfree(ctrl_info);
-
- /*
- * Setup tasklet for cmd completion
- */
-
- tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
- (unsigned long)instance);
-
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
- return 0;
-
- fail_fw_init:
-
- pci_free_consistent(instance->pdev, reply_q_sz,
- instance->reply_queue, instance->reply_queue_h);
- fail_reply_queue:
- megasas_free_cmds(instance);
-
- fail_alloc_cmds:
- fail_ready_state:
- iounmap(instance->reg_set);
-
- fail_ioremap:
- pci_release_regions(instance->pdev);
-
- return -EINVAL;
-}
-
-/**
- * megasas_release_mfi - Reverses the FW initialization
- * @intance: Adapter soft state
- */
-static void megasas_release_mfi(struct megasas_instance *instance)
-{
- u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1);
-
- pci_free_consistent(instance->pdev, reply_q_sz,
- instance->reply_queue, instance->reply_queue_h);
-
- megasas_free_cmds(instance);
-
- iounmap(instance->reg_set);
-
- pci_release_regions(instance->pdev);
-}
-
-/**
- * megasas_get_seq_num - Gets latest event sequence numbers
- * @instance: Adapter soft state
- * @eli: FW event log sequence numbers information
- *
- * FW maintains a log of all events in a non-volatile area. Upper layers would
- * usually find out the latest sequence number of the events, the seq number at
- * the boot etc. They would "read" all the events below the latest seq number
- * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
- * number), they would subsribe to AEN (asynchronous event notification) and
- * wait for the events to happen.
- */
-static int
-megasas_get_seq_num(struct megasas_instance *instance,
- struct megasas_evt_log_info *eli)
-{
- struct megasas_cmd *cmd;
- struct megasas_dcmd_frame *dcmd;
- struct megasas_evt_log_info *el_info;
- dma_addr_t el_info_h = 0;
-
- cmd = megasas_get_cmd(instance);
-
- if (!cmd) {
- return -ENOMEM;
- }
-
- dcmd = &cmd->frame->dcmd;
- el_info = pci_alloc_consistent(instance->pdev,
- sizeof(struct megasas_evt_log_info),
- &el_info_h);
-
- if (!el_info) {
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
- memset(el_info, 0, sizeof(*el_info));
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0x0;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
- dcmd->timeout = 0;
- dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
- dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
- dcmd->sgl.sge32[0].phys_addr = el_info_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
-
- megasas_issue_blocked_cmd(instance, cmd);
-
- /*
- * Copy the data back into callers buffer
- */
- memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
-
- pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
- el_info, el_info_h);
-
- megasas_return_cmd(instance, cmd);
-
- return 0;
-}
-
-/**
- * megasas_register_aen - Registers for asynchronous event notification
- * @instance: Adapter soft state
- * @seq_num: The starting sequence number
- * @class_locale: Class of the event
- *
- * This function subscribes for AEN for events beyond the @seq_num. It requests
- * to be notified if and only if the event is of type @class_locale
- */
-static int
-megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
- u32 class_locale_word)
-{
- int ret_val;
- struct megasas_cmd *cmd;
- struct megasas_dcmd_frame *dcmd;
- union megasas_evt_class_locale curr_aen;
- union megasas_evt_class_locale prev_aen;
-
- /*
- * If there an AEN pending already (aen_cmd), check if the
- * class_locale of that pending AEN is inclusive of the new
- * AEN request we currently have. If it is, then we don't have
- * to do anything. In other words, whichever events the current
- * AEN request is subscribing to, have already been subscribed
- * to.
- *
- * If the old_cmd is _not_ inclusive, then we have to abort
- * that command, form a class_locale that is superset of both
- * old and current and re-issue to the FW
- */
-
- curr_aen.word = class_locale_word;
-
- if (instance->aen_cmd) {
-
- prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
-
- /*
- * A class whose enum value is smaller is inclusive of all
- * higher values. If a PROGRESS (= -1) was previously
- * registered, then a new registration requests for higher
- * classes need not be sent to FW. They are automatically
- * included.
- *
- * Locale numbers don't have such hierarchy. They are bitmap
- * values
- */
- if ((prev_aen.members.class <= curr_aen.members.class) &&
- !((prev_aen.members.locale & curr_aen.members.locale) ^
- curr_aen.members.locale)) {
- /*
- * Previously issued event registration includes
- * current request. Nothing to do.
- */
- return 0;
- } else {
- curr_aen.members.locale |= prev_aen.members.locale;
-
- if (prev_aen.members.class < curr_aen.members.class)
- curr_aen.members.class = prev_aen.members.class;
-
- instance->aen_cmd->abort_aen = 1;
- ret_val = megasas_issue_blocked_abort_cmd(instance,
- instance->
- aen_cmd);
-
- if (ret_val) {
- printk(KERN_DEBUG "megasas: Failed to abort "
- "previous AEN command\n");
- return ret_val;
- }
- }
- }
-
- cmd = megasas_get_cmd(instance);
-
- if (!cmd)
- return -ENOMEM;
-
- dcmd = &cmd->frame->dcmd;
-
- memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
-
- /*
- * Prepare DCMD for aen registration
- */
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0x0;
- dcmd->sge_count = 1;
- dcmd->flags = MFI_FRAME_DIR_READ;
- dcmd->timeout = 0;
- dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
- dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
- dcmd->mbox.w[0] = seq_num;
- dcmd->mbox.w[1] = curr_aen.word;
- dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
- dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
-
- /*
- * Store reference to the cmd used to register for AEN. When an
- * application wants us to register for AEN, we have to abort this
- * cmd and re-register with a new EVENT LOCALE supplied by that app
- */
- instance->aen_cmd = cmd;
-
- /*
- * Issue the aen registration frame
- */
- instance->instancet->fire_cmd(cmd->frame_phys_addr ,0,instance->reg_set);
-
- return 0;
-}
-
-/**
- * megasas_start_aen - Subscribes to AEN during driver load time
- * @instance: Adapter soft state
- */
-static int megasas_start_aen(struct megasas_instance *instance)
-{
- struct megasas_evt_log_info eli;
- union megasas_evt_class_locale class_locale;
-
- /*
- * Get the latest sequence number from FW
- */
- memset(&eli, 0, sizeof(eli));
-
- if (megasas_get_seq_num(instance, &eli))
- return -1;
-
- /*
- * Register AEN with FW for latest sequence number plus 1
- */
- class_locale.members.reserved = 0;
- class_locale.members.locale = MR_EVT_LOCALE_ALL;
- class_locale.members.class = MR_EVT_CLASS_DEBUG;
-
- return megasas_register_aen(instance, eli.newest_seq_num + 1,
- class_locale.word);
-}
-
-/**
- * megasas_io_attach - Attaches this driver to SCSI mid-layer
- * @instance: Adapter soft state
- */
-static int megasas_io_attach(struct megasas_instance *instance)
-{
- struct Scsi_Host *host = instance->host;
-
- /*
- * Export parameters required by SCSI mid-layer
- */
- host->irq = instance->pdev->irq;
- host->unique_id = instance->unique_id;
- host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS;
- host->this_id = instance->init_id;
- host->sg_tablesize = instance->max_num_sge;
- host->max_sectors = instance->max_sectors_per_req;
- host->cmd_per_lun = 128;
- host->max_channel = MEGASAS_MAX_CHANNELS - 1;
- host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
- host->max_lun = MEGASAS_MAX_LUN;
- host->max_cmd_len = 16;
-
- /*
- * Notify the mid-layer about the new controller
- */
- if (scsi_add_host(host, &instance->pdev->dev)) {
- printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
- return -ENODEV;
- }
-
- /*
- * Trigger SCSI to scan our drives
- */
- scsi_scan_host(host);
- return 0;
-}
-
-static int
-megasas_set_dma_mask(struct pci_dev *pdev)
-{
- /*
- * All our contollers are capable of performing 64-bit DMA
- */
- if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
-
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
- goto fail_set_dma_mask;
- }
- } else {
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
- goto fail_set_dma_mask;
- }
- return 0;
-
-fail_set_dma_mask:
- return 1;
-}
-
-/**
- * megasas_probe_one - PCI hotplug entry point
- * @pdev: PCI device structure
- * @id: PCI ids of supported hotplugged adapter
- */
-static int __devinit
-megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- int rval;
- struct Scsi_Host *host;
- struct megasas_instance *instance;
-
- /*
- * Announce PCI information
- */
- printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
- pdev->vendor, pdev->device, pdev->subsystem_vendor,
- pdev->subsystem_device);
-
- printk("bus %d:slot %d:func %d\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
-
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device(pdev);
-
- if (rval) {
- return rval;
- }
-
- pci_set_master(pdev);
-
- if (megasas_set_dma_mask(pdev))
- goto fail_set_dma_mask;
-
- host = scsi_host_alloc(&megasas_template,
- sizeof(struct megasas_instance));
-
- if (!host) {
- printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
- goto fail_alloc_instance;
- }
-
- instance = (struct megasas_instance *)host->hostdata;
- memset(instance, 0, sizeof(*instance));
-
- instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer = pci_alloc_consistent(pdev, sizeof(u32),
- &instance->consumer_h);
-
- if (!instance->producer || !instance->consumer) {
- printk(KERN_DEBUG "megasas: Failed to allocate memory for "
- "producer, consumer\n");
- goto fail_alloc_dma_buf;
- }
-
- *instance->producer = 0;
- *instance->consumer = 0;
-
- instance->evt_detail = pci_alloc_consistent(pdev,
- sizeof(struct
- megasas_evt_detail),
- &instance->evt_detail_h);
-
- if (!instance->evt_detail) {
- printk(KERN_DEBUG "megasas: Failed to allocate memory for "
- "event detail structure\n");
- goto fail_alloc_dma_buf;
- }
-
- /*
- * Initialize locks and queues
- */
- INIT_LIST_HEAD(&instance->cmd_pool);
-
- atomic_set(&instance->fw_outstanding,0);
-
- init_waitqueue_head(&instance->int_cmd_wait_q);
- init_waitqueue_head(&instance->abort_cmd_wait_q);
-
- spin_lock_init(&instance->cmd_pool_lock);
- spin_lock_init(&instance->completion_lock);
-
- mutex_init(&instance->aen_mutex);
- sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
-
- /*
- * Initialize PCI related and misc parameters
- */
- instance->pdev = pdev;
- instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
- instance->init_id = MEGASAS_DEFAULT_INIT_ID;
-
- megasas_dbg_lvl = 0;
- instance->flag = 0;
- instance->last_time = 0;
-
- /*
- * Initialize MFI Firmware
- */
- if (megasas_init_mfi(instance))
- goto fail_init_mfi;
-
- /*
- * Register IRQ
- */
- if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, "megasas", instance)) {
- printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
- goto fail_irq;
- }
-
- instance->instancet->enable_intr(instance->reg_set);
-
- /*
- * Store instance in PCI softstate
- */
- pci_set_drvdata(pdev, instance);
-
- /*
- * Add this controller to megasas_mgmt_info structure so that it
- * can be exported to management applications
- */
- megasas_mgmt_info.count++;
- megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
- megasas_mgmt_info.max_index++;
-
- /*
- * Initiate AEN (Asynchronous Event Notification)
- */
- if (megasas_start_aen(instance)) {
- printk(KERN_DEBUG "megasas: start aen failed\n");
- goto fail_start_aen;
- }
-
- /*
- * Register with SCSI mid-layer
- */
- if (megasas_io_attach(instance))
- goto fail_io_attach;
-
- return 0;
-
- fail_start_aen:
- fail_io_attach:
- megasas_mgmt_info.count--;
- megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
- megasas_mgmt_info.max_index--;
-
- pci_set_drvdata(pdev, NULL);
- instance->instancet->disable_intr(instance->reg_set);
- free_irq(instance->pdev->irq, instance);
-
- megasas_release_mfi(instance);
-
- fail_irq:
- fail_init_mfi:
- fail_alloc_dma_buf:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
- scsi_host_put(host);
-
- fail_alloc_instance:
- fail_set_dma_mask:
- pci_disable_device(pdev);
-
- return -ENODEV;
-}
-
-/**
- * megasas_flush_cache - Requests FW to flush all its caches
- * @instance: Adapter soft state
- */
-static void megasas_flush_cache(struct megasas_instance *instance)
-{
- struct megasas_cmd *cmd;
- struct megasas_dcmd_frame *dcmd;
-
- cmd = megasas_get_cmd(instance);
-
- if (!cmd)
- return;
-
- dcmd = &cmd->frame->dcmd;
-
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0x0;
- dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
- dcmd->timeout = 0;
- dcmd->data_xfer_len = 0;
- dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
- dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
-
- megasas_issue_blocked_cmd(instance, cmd);
-
- megasas_return_cmd(instance, cmd);
-
- return;
-}
-
-/**
- * megasas_shutdown_controller - Instructs FW to shutdown the controller
- * @instance: Adapter soft state
- * @opcode: Shutdown/Hibernate
- */
-static void megasas_shutdown_controller(struct megasas_instance *instance,
- u32 opcode)
-{
- struct megasas_cmd *cmd;
- struct megasas_dcmd_frame *dcmd;
-
- cmd = megasas_get_cmd(instance);
-
- if (!cmd)
- return;
-
- if (instance->aen_cmd)
- megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
-
- dcmd = &cmd->frame->dcmd;
-
- memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
- dcmd->cmd = MFI_CMD_DCMD;
- dcmd->cmd_status = 0x0;
- dcmd->sge_count = 0;
- dcmd->flags = MFI_FRAME_DIR_NONE;
- dcmd->timeout = 0;
- dcmd->data_xfer_len = 0;
- dcmd->opcode = opcode;
-
- megasas_issue_blocked_cmd(instance, cmd);
-
- megasas_return_cmd(instance, cmd);
-
- return;
-}
-
-#ifdef CONFIG_PM
-/**
- * megasas_suspend - driver suspend entry point
- * @pdev: PCI device structure
- * @state: PCI power state to suspend routine
- */
-static int
-megasas_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct Scsi_Host *host;
- struct megasas_instance *instance;
-
- instance = pci_get_drvdata(pdev);
- host = instance->host;
-
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
- megasas_flush_cache(instance);
- megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
- tasklet_kill(&instance->isr_tasklet);
-
- pci_set_drvdata(instance->pdev, instance);
- instance->instancet->disable_intr(instance->reg_set);
- free_irq(instance->pdev->irq, instance);
-
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-/**
- * megasas_resume- driver resume entry point
- * @pdev: PCI device structure
- */
-static int
-megasas_resume(struct pci_dev *pdev)
-{
- int rval;
- struct Scsi_Host *host;
- struct megasas_instance *instance;
-
- instance = pci_get_drvdata(pdev);
- host = instance->host;
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device(pdev);
-
- if (rval) {
- printk(KERN_ERR "megasas: Enable device failed\n");
- return rval;
- }
-
- pci_set_master(pdev);
-
- if (megasas_set_dma_mask(pdev))
- goto fail_set_dma_mask;
-
- /*
- * Initialize MFI Firmware
- */
-
- *instance->producer = 0;
- *instance->consumer = 0;
-
- atomic_set(&instance->fw_outstanding, 0);
-
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance))
- goto fail_ready_state;
-
- if (megasas_issue_init_mfi(instance))
- goto fail_init_mfi;
-
- tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
- (unsigned long)instance);
-
- /*
- * Register IRQ
- */
- if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED,
- "megasas", instance)) {
- printk(KERN_ERR "megasas: Failed to register IRQ\n");
- goto fail_irq;
- }
-
- instance->instancet->enable_intr(instance->reg_set);
-
- /*
- * Initiate AEN (Asynchronous Event Notification)
- */
- if (megasas_start_aen(instance))
- printk(KERN_ERR "megasas: Start AEN failed\n");
-
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
- return 0;
-
-fail_irq:
-fail_init_mfi:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
- scsi_host_put(host);
-
-fail_set_dma_mask:
-fail_ready_state:
-
- pci_disable_device(pdev);
-
- return -ENODEV;
-}
-#else
-#define megasas_suspend NULL
-#define megasas_resume NULL
-#endif
-
-/**
- * megasas_detach_one - PCI hot"un"plug entry point
- * @pdev: PCI device structure
- */
-static void __devexit megasas_detach_one(struct pci_dev *pdev)
-{
- int i;
- struct Scsi_Host *host;
- struct megasas_instance *instance;
-
- instance = pci_get_drvdata(pdev);
- host = instance->host;
-
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
- scsi_remove_host(instance->host);
- megasas_flush_cache(instance);
- megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
- tasklet_kill(&instance->isr_tasklet);
-
- /*
- * Take the instance off the instance array. Note that we will not
- * decrement the max_index. We let this array be sparse array
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- if (megasas_mgmt_info.instance[i] == instance) {
- megasas_mgmt_info.count--;
- megasas_mgmt_info.instance[i] = NULL;
-
- break;
- }
- }
-
- pci_set_drvdata(instance->pdev, NULL);
-
- instance->instancet->disable_intr(instance->reg_set);
-
- free_irq(instance->pdev->irq, instance);
-
- megasas_release_mfi(instance);
-
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail, instance->evt_detail_h);
-
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
-
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
-
- scsi_host_put(host);
-
- pci_set_drvdata(pdev, NULL);
-
- pci_disable_device(pdev);
-
- return;
-}
-
-/**
- * megasas_shutdown - Shutdown entry point
- * @device: Generic device structure
- */
-static void megasas_shutdown(struct pci_dev *pdev)
-{
- struct megasas_instance *instance = pci_get_drvdata(pdev);
- megasas_flush_cache(instance);
-}
-
-/**
- * megasas_mgmt_open - char node "open" entry point
- */
-static int megasas_mgmt_open(struct inode *inode, struct file *filep)
-{
- cycle_kernel_lock();
- /*
- * Allow only those users with admin rights
- */
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- return 0;
-}
-
-/**
- * megasas_mgmt_release - char node "release" entry point
- */
-static int megasas_mgmt_release(struct inode *inode, struct file *filep)
-{
- filep->private_data = NULL;
- fasync_helper(-1, filep, 0, &megasas_async_queue);
-
- return 0;
-}
-
-/**
- * megasas_mgmt_fasync - Async notifier registration from applications
- *
- * This function adds the calling process to a driver global queue. When an
- * event occurs, SIGIO will be sent to all processes in this queue.
- */
-static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
-{
- int rc;
-
- mutex_lock(&megasas_async_queue_mutex);
-
- rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
-
- mutex_unlock(&megasas_async_queue_mutex);
-
- if (rc >= 0) {
- /* For sanity check when we get ioctl */
- filep->private_data = filep;
- return 0;
- }
-
- printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
-
- return rc;
-}
-
-/**
- * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
- * @instance: Adapter soft state
- * @argp: User's ioctl packet
- */
-static int
-megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
- struct megasas_iocpacket __user * user_ioc,
- struct megasas_iocpacket *ioc)
-{
- struct megasas_sge32 *kern_sge32;
- struct megasas_cmd *cmd;
- void *kbuff_arr[MAX_IOCTL_SGE];
- dma_addr_t buf_handle = 0;
- int error = 0, i;
- void *sense = NULL;
- dma_addr_t sense_handle;
- u32 *sense_ptr;
-
- memset(kbuff_arr, 0, sizeof(kbuff_arr));
-
- if (ioc->sge_count > MAX_IOCTL_SGE) {
- printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
- ioc->sge_count, MAX_IOCTL_SGE);
- return -EINVAL;
- }
-
- cmd = megasas_get_cmd(instance);
- if (!cmd) {
- printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
- return -ENOMEM;
- }
-
- /*
- * User's IOCTL packet has 2 frames (maximum). Copy those two
- * frames into our cmd's frames. cmd->frame's context will get
- * overwritten when we copy from user's frames. So set that value
- * alone separately
- */
- memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
- cmd->frame->hdr.context = cmd->index;
-
- /*
- * The management interface between applications and the fw uses
- * MFI frames. E.g, RAID configuration changes, LD property changes
- * etc are accomplishes through different kinds of MFI frames. The
- * driver needs to care only about substituting user buffers with
- * kernel buffers in SGLs. The location of SGL is embedded in the
- * struct iocpacket itself.
- */
- kern_sge32 = (struct megasas_sge32 *)
- ((unsigned long)cmd->frame + ioc->sgl_off);
-
- /*
- * For each user buffer, create a mirror buffer and copy in
- */
- for (i = 0; i < ioc->sge_count; i++) {
- kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
- ioc->sgl[i].iov_len,
- &buf_handle, GFP_KERNEL);
- if (!kbuff_arr[i]) {
- printk(KERN_DEBUG "megasas: Failed to alloc "
- "kernel SGL buffer for IOCTL \n");
- error = -ENOMEM;
- goto out;
- }
-
- /*
- * We don't change the dma_coherent_mask, so
- * pci_alloc_consistent only returns 32bit addresses
- */
- kern_sge32[i].phys_addr = (u32) buf_handle;
- kern_sge32[i].length = ioc->sgl[i].iov_len;
-
- /*
- * We created a kernel buffer corresponding to the
- * user buffer. Now copy in from the user buffer
- */
- if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
- (u32) (ioc->sgl[i].iov_len))) {
- error = -EFAULT;
- goto out;
- }
- }
-
- if (ioc->sense_len) {
- sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
- &sense_handle, GFP_KERNEL);
- if (!sense) {
- error = -ENOMEM;
- goto out;
- }
-
- sense_ptr =
- (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
- *sense_ptr = sense_handle;
- }
-
- /*
- * Set the sync_cmd flag so that the ISR knows not to complete this
- * cmd to the SCSI mid-layer
- */
- cmd->sync_cmd = 1;
- megasas_issue_blocked_cmd(instance, cmd);
- cmd->sync_cmd = 0;
-
- /*
- * copy out the kernel buffers to user buffers
- */
- for (i = 0; i < ioc->sge_count; i++) {
- if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
- ioc->sgl[i].iov_len)) {
- error = -EFAULT;
- goto out;
- }
- }
-
- /*
- * copy out the sense
- */
- if (ioc->sense_len) {
- /*
- * sense_ptr points to the location that has the user
- * sense buffer address
- */
- sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
- ioc->sense_off);
-
- if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
- sense, ioc->sense_len)) {
- printk(KERN_ERR "megasas: Failed to copy out to user "
- "sense data\n");
- error = -EFAULT;
- goto out;
- }
- }
-
- /*
- * copy the status codes returned by the fw
- */
- if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
- &cmd->frame->hdr.cmd_status, sizeof(u8))) {
- printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
- error = -EFAULT;
- }
-
- out:
- if (sense) {
- dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
- sense, sense_handle);
- }
-
- for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
- dma_free_coherent(&instance->pdev->dev,
- kern_sge32[i].length,
- kbuff_arr[i], kern_sge32[i].phys_addr);
- }
-
- megasas_return_cmd(instance, cmd);
- return error;
-}
-
-static struct megasas_instance *megasas_lookup_instance(u16 host_no)
-{
- int i;
-
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-
- if ((megasas_mgmt_info.instance[i]) &&
- (megasas_mgmt_info.instance[i]->host->host_no == host_no))
- return megasas_mgmt_info.instance[i];
- }
-
- return NULL;
-}
-
-static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
-{
- struct megasas_iocpacket __user *user_ioc =
- (struct megasas_iocpacket __user *)arg;
- struct megasas_iocpacket *ioc;
- struct megasas_instance *instance;
- int error;
-
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
- if (!ioc)
- return -ENOMEM;
-
- if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
- error = -EFAULT;
- goto out_kfree_ioc;
- }
-
- instance = megasas_lookup_instance(ioc->host_no);
- if (!instance) {
- error = -ENODEV;
- goto out_kfree_ioc;
- }
-
- /*
- * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
- */
- if (down_interruptible(&instance->ioctl_sem)) {
- error = -ERESTARTSYS;
- goto out_kfree_ioc;
- }
- error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
- up(&instance->ioctl_sem);
-
- out_kfree_ioc:
- kfree(ioc);
- return error;
-}
-
-static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
-{
- struct megasas_instance *instance;
- struct megasas_aen aen;
- int error;
-
- if (file->private_data != file) {
- printk(KERN_DEBUG "megasas: fasync_helper was not "
- "called first\n");
- return -EINVAL;
- }
-
- if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
- return -EFAULT;
-
- instance = megasas_lookup_instance(aen.host_no);
-
- if (!instance)
- return -ENODEV;
-
- mutex_lock(&instance->aen_mutex);
- error = megasas_register_aen(instance, aen.seq_num,
- aen.class_locale_word);
- mutex_unlock(&instance->aen_mutex);
- return error;
-}
-
-/**
- * megasas_mgmt_ioctl - char node ioctl entry point
- */
-static long
-megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case MEGASAS_IOC_FIRMWARE:
- return megasas_mgmt_ioctl_fw(file, arg);
-
- case MEGASAS_IOC_GET_AEN:
- return megasas_mgmt_ioctl_aen(file, arg);
- }
-
- return -ENOTTY;
-}
-
-#ifdef CONFIG_COMPAT
-static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
-{
- struct compat_megasas_iocpacket __user *cioc =
- (struct compat_megasas_iocpacket __user *)arg;
- struct megasas_iocpacket __user *ioc =
- compat_alloc_user_space(sizeof(struct megasas_iocpacket));
- int i;
- int error = 0;
-
- if (clear_user(ioc, sizeof(*ioc)))
- return -EFAULT;
-
- if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
- copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
- copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
- copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
- copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
- copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
- return -EFAULT;
-
- for (i = 0; i < MAX_IOCTL_SGE; i++) {
- compat_uptr_t ptr;
-
- if (get_user(ptr, &cioc->sgl[i].iov_base) ||
- put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
- copy_in_user(&ioc->sgl[i].iov_len,
- &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
- return -EFAULT;
- }
-
- error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
-
- if (copy_in_user(&cioc->frame.hdr.cmd_status,
- &ioc->frame.hdr.cmd_status, sizeof(u8))) {
- printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
- return -EFAULT;
- }
- return error;
-}
-
-static long
-megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- switch (cmd) {
- case MEGASAS_IOC_FIRMWARE32:
- return megasas_mgmt_compat_ioctl_fw(file, arg);
- case MEGASAS_IOC_GET_AEN:
- return megasas_mgmt_ioctl_aen(file, arg);
- }
-
- return -ENOTTY;
-}
-#endif
-
-/*
- * File operations structure for management interface
- */
-static const struct file_operations megasas_mgmt_fops = {
- .owner = THIS_MODULE,
- .open = megasas_mgmt_open,
- .release = megasas_mgmt_release,
- .fasync = megasas_mgmt_fasync,
- .unlocked_ioctl = megasas_mgmt_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = megasas_mgmt_compat_ioctl,
-#endif
-};
-
-/*
- * PCI hotplug support registration structure
- */
-static struct pci_driver megasas_pci_driver = {
-
- .name = "megaraid_sas",
- .id_table = megasas_pci_table,
- .probe = megasas_probe_one,
- .remove = __devexit_p(megasas_detach_one),
- .suspend = megasas_suspend,
- .resume = megasas_resume,
- .shutdown = megasas_shutdown,
-};
-
-/*
- * Sysfs driver attributes
- */
-static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
-{
- return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
- MEGASAS_VERSION);
-}
-
-static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
-
-static ssize_t
-megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
-{
- return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
- MEGASAS_RELDATE);
-}
-
-static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
- NULL);
-
-static ssize_t
-megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
-{
- return sprintf(buf, "%u\n", megasas_dbg_lvl);
-}
-
-static ssize_t
-megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
-{
- int retval = count;
- if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
- printk(KERN_ERR "megasas: could not set dbg_lvl\n");
- retval = -EINVAL;
- }
- return retval;
-}
-
-static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUGO, megasas_sysfs_show_dbg_lvl,
- megasas_sysfs_set_dbg_lvl);
-
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
- return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
- const char *buf, size_t count)
-{
- int retval = count;
- int tmp = poll_mode_io;
- int i;
- struct megasas_instance *instance;
-
- if (sscanf(buf, "%u", &poll_mode_io) < 1) {
- printk(KERN_ERR "megasas: could not set poll_mode_io\n");
- retval = -EINVAL;
- }
-
- /*
- * Check if poll_mode_io is already set or is same as previous value
- */
- if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
- goto out;
-
- if (poll_mode_io) {
- /*
- * Start timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance) {
- megasas_start_timer(instance,
- &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
- }
- }
- } else {
- /*
- * Delete timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance)
- del_timer_sync(&instance->io_completion_timer);
- }
- }
-
-out:
- return retval;
-}
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO,
- megasas_sysfs_show_poll_mode_io,
- megasas_sysfs_set_poll_mode_io);
-
-/**
- * megasas_init - Driver load entry point
- */
-static int __init megasas_init(void)
-{
- int rval;
-
- /*
- * Announce driver version and other information
- */
- printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
- MEGASAS_EXT_VERSION);
-
- memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
-
- /*
- * Register character device node
- */
- rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
-
- if (rval < 0) {
- printk(KERN_DEBUG "megasas: failed to open device node\n");
- return rval;
- }
-
- megasas_mgmt_majorno = rval;
-
- /*
- * Register ourselves as PCI hotplug module
- */
- rval = pci_register_driver(&megasas_pci_driver);
-
- if (rval) {
- printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
- goto err_pcidrv;
- }
-
- rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_version);
- if (rval)
- goto err_dcf_attr_ver;
- rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_release_date);
- if (rval)
- goto err_dcf_rel_date;
- rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_dbg_lvl);
- if (rval)
- goto err_dcf_dbg_lvl;
- rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- if (rval)
- goto err_dcf_poll_mode_io;
-
- return rval;
-
-err_dcf_poll_mode_io:
- driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_dbg_lvl);
-err_dcf_dbg_lvl:
- driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_release_date);
-err_dcf_rel_date:
- driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
-err_dcf_attr_ver:
- pci_unregister_driver(&megasas_pci_driver);
-err_pcidrv:
- unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
- return rval;
-}
-
-/**
- * megasas_exit - Driver unload entry point
- */
-static void __exit megasas_exit(void)
-{
- driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_dbg_lvl);
- driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_release_date);
- driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
-
- pci_unregister_driver(&megasas_pci_driver);
- unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
-}
-
-module_init(megasas_init);
-module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index b0c41e67170..32166c2c785 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -1,15 +1,30 @@
/*
+ * Linux MegaRAID driver for SAS based RAID controllers
*
- * Linux MegaRAID driver for SAS based RAID controllers
+ * Copyright (c) 2003-2012 LSI Corporation.
*
- * Copyright (c) 2003-2005 LSI Corporation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*
- * FILE : megaraid_sas.h
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * FILE: megaraid_sas.h
+ *
+ * Authors: LSI Corporation
+ *
+ * Send feedback to: <megaraidlinux@lsi.com>
+ *
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: Linuxraid
*/
#ifndef LSI_MEGARAID_SAS_H
@@ -18,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.03.20-rc1"
-#define MEGASAS_RELDATE "March 10, 2008"
-#define MEGASAS_EXT_VERSION "Mon. March 10 11:02:31 PDT 2008"
+#define MEGASAS_VERSION "06.803.01.00-rc1"
+#define MEGASAS_RELDATE "Mar. 10, 2014"
+#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014"
/*
* Device IDs
@@ -28,6 +43,40 @@
#define PCI_DEVICE_ID_LSI_SAS1078R 0x0060
#define PCI_DEVICE_ID_LSI_SAS1078DE 0x007C
#define PCI_DEVICE_ID_LSI_VERDE_ZCR 0x0413
+#define PCI_DEVICE_ID_LSI_SAS1078GEN2 0x0078
+#define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079
+#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
+#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
+#define PCI_DEVICE_ID_LSI_FUSION 0x005b
+#define PCI_DEVICE_ID_LSI_PLASMA 0x002f
+#define PCI_DEVICE_ID_LSI_INVADER 0x005d
+#define PCI_DEVICE_ID_LSI_FURY 0x005f
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MEGARAID_INTEL_RS3DC080_SSDID 0x9360
+#define MEGARAID_INTEL_RS3DC040_SSDID 0x9362
+#define MEGARAID_INTEL_RS3SC008_SSDID 0x9380
+#define MEGARAID_INTEL_RS3MC044_SSDID 0x9381
+#define MEGARAID_INTEL_RS3WC080_SSDID 0x9341
+#define MEGARAID_INTEL_RS3WC040_SSDID 0x9343
+
+/*
+ * Intel HBA branding
+ */
+#define MEGARAID_INTEL_RS3DC080_BRANDING \
+ "Intel(R) RAID Controller RS3DC080"
+#define MEGARAID_INTEL_RS3DC040_BRANDING \
+ "Intel(R) RAID Controller RS3DC040"
+#define MEGARAID_INTEL_RS3SC008_BRANDING \
+ "Intel(R) RAID Controller RS3SC008"
+#define MEGARAID_INTEL_RS3MC044_BRANDING \
+ "Intel(R) RAID Controller RS3MC044"
+#define MEGARAID_INTEL_RS3WC080_BRANDING \
+ "Intel(R) RAID Controller RS3WC080"
+#define MEGARAID_INTEL_RS3WC040_BRANDING \
+ "Intel(R) RAID Controller RS3WC040"
/*
* =====================================
@@ -56,7 +105,8 @@
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
-
+#define MFI_RESET_REQUIRED 0x00000001
+#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
/*
@@ -69,6 +119,12 @@
* HOTPLUG : Resume from Hotplug
* MFI_STOP_ADP : Send signal to FW to stop processing
*/
+#define WRITE_SEQUENCE_OFFSET (0x0000000FC) /* I20 */
+#define HOST_DIAGNOSTIC_OFFSET (0x000000F8) /* I20 */
+#define DIAG_WRITE_ENABLE (0x00000080)
+#define DIAG_RESET_ADAPTER (0x00000004)
+
+#define MFI_ADP_RESET 0x00000040
#define MFI_INIT_ABORT 0x00000001
#define MFI_INIT_READY 0x00000002
#define MFI_INIT_MFIMODE 0x00000004
@@ -92,6 +148,7 @@
#define MFI_FRAME_DIR_WRITE 0x0008
#define MFI_FRAME_DIR_READ 0x0010
#define MFI_FRAME_DIR_BOTH 0x0018
+#define MFI_FRAME_IEEE 0x0020
/*
* Definition for cmd_status
@@ -110,8 +167,11 @@
#define MFI_CMD_ABORT 0x06
#define MFI_CMD_SMP 0x07
#define MFI_CMD_STP 0x08
+#define MFI_CMD_INVALID 0xff
#define MR_DCMD_CTRL_GET_INFO 0x01010000
+#define MR_DCMD_LD_GET_LIST 0x03010000
+#define MR_DCMD_LD_LIST_QUERY 0x03010100
#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
#define MR_FLUSH_CTRL_CACHE 0x01
@@ -129,6 +189,13 @@
#define MR_DCMD_CLUSTER 0x08000000
#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
+#define MR_DCMD_PD_LIST_QUERY 0x02010100
+
+/*
+ * Global functions
+ */
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+
/*
* MFI command completion codes
@@ -191,6 +258,7 @@ enum MFI_STAT {
MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
+ MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
MFI_STAT_INVALID_STATUS = 0xFF
};
@@ -249,9 +317,143 @@ enum MR_EVT_ARGS {
MR_EVT_ARGS_STR,
MR_EVT_ARGS_TIME,
MR_EVT_ARGS_ECC,
+ MR_EVT_ARGS_LD_PROP,
+ MR_EVT_ARGS_PD_SPARE,
+ MR_EVT_ARGS_PD_INDEX,
+ MR_EVT_ARGS_DIAG_PASS,
+ MR_EVT_ARGS_DIAG_FAIL,
+ MR_EVT_ARGS_PD_LBA_LBA,
+ MR_EVT_ARGS_PORT_PHY,
+ MR_EVT_ARGS_PD_MISSING,
+ MR_EVT_ARGS_PD_ADDRESS,
+ MR_EVT_ARGS_BITMAP,
+ MR_EVT_ARGS_CONNECTOR,
+ MR_EVT_ARGS_PD_PD,
+ MR_EVT_ARGS_PD_FRU,
+ MR_EVT_ARGS_PD_PATHINFO,
+ MR_EVT_ARGS_PD_POWER_STATE,
+ MR_EVT_ARGS_GENERIC,
+};
+/*
+ * define constants for device list query options
+ */
+enum MR_PD_QUERY_TYPE {
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5,
};
+enum MR_LD_QUERY_TYPE {
+ MR_LD_QUERY_TYPE_ALL = 0,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1,
+ MR_LD_QUERY_TYPE_USED_TGT_IDS = 2,
+ MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3,
+ MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4,
+};
+
+
+#define MR_EVT_CFG_CLEARED 0x0004
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+#define MAX_LOGICAL_DRIVES 64
+
+enum MR_PD_STATE {
+ MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
+ MR_PD_STATE_UNCONFIGURED_BAD = 0x01,
+ MR_PD_STATE_HOT_SPARE = 0x02,
+ MR_PD_STATE_OFFLINE = 0x10,
+ MR_PD_STATE_FAILED = 0x11,
+ MR_PD_STATE_REBUILD = 0x14,
+ MR_PD_STATE_ONLINE = 0x18,
+ MR_PD_STATE_COPYBACK = 0x20,
+ MR_PD_STATE_SYSTEM = 0x40,
+ };
+
+
+ /*
+ * defines the physical drive address structure
+ */
+struct MR_PD_ADDRESS {
+ u16 deviceId;
+ u16 enclDeviceId;
+
+ union {
+ struct {
+ u8 enclIndex;
+ u8 slotNumber;
+ } mrPdAddress;
+ struct {
+ u8 enclPosition;
+ u8 enclConnectorIndex;
+ } mrEnclAddress;
+ };
+ u8 scsiDevType;
+ union {
+ u8 connectedPortBitmap;
+ u8 connectedPortNumbers;
+ };
+ u64 sasAddr[2];
+} __packed;
+
+/*
+ * defines the physical drive list structure
+ */
+struct MR_PD_LIST {
+ u32 size;
+ u32 count;
+ struct MR_PD_ADDRESS addr[1];
+} __packed;
+
+struct megasas_pd_list {
+ u16 tid;
+ u8 driveType;
+ u8 driveState;
+} __packed;
+
+ /*
+ * defines the logical drive reference structure
+ */
+union MR_LD_REF {
+ struct {
+ u8 targetId;
+ u8 reserved;
+ u16 seqNum;
+ };
+ u32 ref;
+} __packed;
+
+/*
+ * defines the logical drive list structure
+ */
+struct MR_LD_LIST {
+ u32 ldCount;
+ u32 reserved;
+ struct {
+ union MR_LD_REF ref;
+ u8 state;
+ u8 reserved[3];
+ u64 size;
+ } ldList[MAX_LOGICAL_DRIVES];
+} __packed;
+
+struct MR_LD_TARGETID_LIST {
+ u32 size;
+ u32 count;
+ u8 pad[3];
+ u8 targetId[MAX_LOGICAL_DRIVES];
+};
+
+
/*
* SAS controller properties
*/
@@ -278,9 +480,58 @@ struct megasas_ctrl_prop {
u16 ecc_bucket_leak_rate;
u8 restore_hotspare_on_insertion;
u8 expose_encl_devices;
- u8 reserved[38];
+ u8 maintainPdFailHistory;
+ u8 disallowHostRequestReordering;
+ u8 abortCCOnError;
+ u8 loadBalanceMode;
+ u8 disableAutoDetectBackplane;
-} __attribute__ ((packed));
+ u8 snapVDSpace;
+
+ /*
+ * Add properties that can be controlled by
+ * a bit in the following structure.
+ */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:18;
+ u32 enableJBOD:1;
+ u32 disableSpinDownHS:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 enableSecretKeyControl:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 disableNCQ:1;
+ u32 useFdeOnly:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 SMARTerEnabled:1;
+ u32 copyBackDisabled:1;
+#else
+ u32 copyBackDisabled:1;
+ u32 SMARTerEnabled:1;
+ u32 prCorrectUnconfiguredAreas:1;
+ u32 useFdeOnly:1;
+ u32 disableNCQ:1;
+ u32 SSDSMARTerEnabled:1;
+ u32 SSDPatrolReadEnabled:1;
+ u32 enableSpinDownUnconfigured:1;
+ u32 autoEnhancedImport:1;
+ u32 enableSecretKeyControl:1;
+ u32 disableOnlineCtrlReset:1;
+ u32 allowBootWithPinnedCache:1;
+ u32 disableSpinDownHS:1;
+ u32 enableJBOD:1;
+ u32 reserved:18;
+#endif
+ } OnOffProperties;
+ u8 autoSnapVDSpace;
+ u8 viewSpace;
+ u16 spinDownTime;
+ u8 reserved[24];
+} __packed;
/*
* SAS controller information
@@ -309,7 +560,8 @@ struct megasas_ctrl_info {
u8 PCIE:1;
u8 iSCSI:1;
u8 SAS_3G:1;
- u8 reserved_0:4;
+ u8 SRIOV:1;
+ u8 reserved_0:3;
u8 reserved_1[6];
u8 port_count;
u64 port_addr[8];
@@ -521,9 +773,168 @@ struct megasas_ctrl_info {
*/
char package_version[0x60];
- u8 pad[0x800 - 0x6a0];
-} __attribute__ ((packed));
+ /*
+ * If adapterOperations.supportMoreThan8Phys is set,
+ * and deviceInterface.portCount is greater than 8,
+ * SAS Addrs for first 8 ports shall be populated in
+ * deviceInterface.portAddr, and the rest shall be
+ * populated in deviceInterfacePortAddr2.
+ */
+ u64 deviceInterfacePortAddr2[8]; /*6a0h */
+ u8 reserved3[128]; /*6e0h */
+
+ struct { /*760h */
+ u16 minPdRaidLevel_0:4;
+ u16 maxPdRaidLevel_0:12;
+
+ u16 minPdRaidLevel_1:4;
+ u16 maxPdRaidLevel_1:12;
+
+ u16 minPdRaidLevel_5:4;
+ u16 maxPdRaidLevel_5:12;
+
+ u16 minPdRaidLevel_1E:4;
+ u16 maxPdRaidLevel_1E:12;
+
+ u16 minPdRaidLevel_6:4;
+ u16 maxPdRaidLevel_6:12;
+
+ u16 minPdRaidLevel_10:4;
+ u16 maxPdRaidLevel_10:12;
+
+ u16 minPdRaidLevel_50:4;
+ u16 maxPdRaidLevel_50:12;
+
+ u16 minPdRaidLevel_60:4;
+ u16 maxPdRaidLevel_60:12;
+
+ u16 minPdRaidLevel_1E_RLQ0:4;
+ u16 maxPdRaidLevel_1E_RLQ0:12;
+
+ u16 minPdRaidLevel_1E0_RLQ0:4;
+ u16 maxPdRaidLevel_1E0_RLQ0:12;
+
+ u16 reserved[6];
+ } pdsForRaidLevels;
+
+ u16 maxPds; /*780h */
+ u16 maxDedHSPs; /*782h */
+ u16 maxGlobalHSPs; /*784h */
+ u16 ddfSize; /*786h */
+ u8 maxLdsPerArray; /*788h */
+ u8 partitionsInDDF; /*789h */
+ u8 lockKeyBinding; /*78ah */
+ u8 maxPITsPerLd; /*78bh */
+ u8 maxViewsPerLd; /*78ch */
+ u8 maxTargetId; /*78dh */
+ u16 maxBvlVdSize; /*78eh */
+
+ u16 maxConfigurableSSCSize; /*790h */
+ u16 currentSSCsize; /*792h */
+
+ char expanderFwVersion[12]; /*794h */
+
+ u16 PFKTrialTimeRemaining; /*7A0h */
+
+ u16 cacheMemorySize; /*7A2h */
+
+ struct { /*7A4h */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:5;
+ u32 activePassive:2;
+ u32 supportConfigAutoBalance:1;
+ u32 mpio:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 supportPointInTimeProgress:1;
+ u32 supportUnevenSpans:1;
+ u32 dedicatedHotSparesLimited:1;
+ u32 headlessMode:1;
+ u32 supportEmulatedDrives:1;
+ u32 supportResetNow:1;
+ u32 realTimeScheduler:1;
+ u32 supportSSDPatrolRead:1;
+ u32 supportPerfTuning:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportJBOD:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportShieldState:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType1:1;
+ u32 supportPIcontroller:1;
+#else
+ u32 supportPIcontroller:1;
+ u32 supportLdPIType1:1;
+ u32 supportLdPIType2:1;
+ u32 supportLdPIType3:1;
+ u32 supportLdBBMInfo:1;
+ u32 supportShieldState:1;
+ u32 blockSSDWriteCacheChange:1;
+ u32 supportSuspendResumeBGops:1;
+ u32 supportEmergencySpares:1;
+ u32 supportSetLinkSpeed:1;
+ u32 supportBootTimePFKChange:1;
+ u32 supportJBOD:1;
+ u32 disableOnlinePFKChange:1;
+ u32 supportPerfTuning:1;
+ u32 supportSSDPatrolRead:1;
+ u32 realTimeScheduler:1;
+
+ u32 supportResetNow:1;
+ u32 supportEmulatedDrives:1;
+ u32 headlessMode:1;
+ u32 dedicatedHotSparesLimited:1;
+
+
+ u32 supportUnevenSpans:1;
+ u32 supportPointInTimeProgress:1;
+ u32 supportDataLDonSSCArray:1;
+ u32 mpio:1;
+ u32 supportConfigAutoBalance:1;
+ u32 activePassive:2;
+ u32 reserved:5;
+#endif
+ } adapterOperations2;
+
+ u8 driverVersion[32]; /*7A8h */
+ u8 maxDAPdCountSpinup60; /*7C8h */
+ u8 temperatureROC; /*7C9h */
+ u8 temperatureCtrl; /*7CAh */
+ u8 reserved4; /*7CBh */
+ u16 maxConfigurablePds; /*7CCh */
+
+
+ u8 reserved5[2]; /*0x7CDh */
+
+ /*
+ * HA cluster information
+ */
+ struct {
+ u32 peerIsPresent:1;
+ u32 peerIsIncompatible:1;
+ u32 hwIncompatible:1;
+ u32 fwVersionMismatch:1;
+ u32 ctrlPropIncompatible:1;
+ u32 premiumFeatureMismatch:1;
+ u32 reserved:26;
+ } cluster;
+
+ char clusterId[16]; /*7D4h */
+ struct {
+ u8 maxVFsSupported; /*0x7E4*/
+ u8 numVFsEnabled; /*0x7E5*/
+ u8 requestorId; /*0x7E6 0:PF, 1:VF1, 2:VF2*/
+ u8 reserved; /*0x7E7*/
+ } iov;
+
+ u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */
+} __packed;
/*
* ===============================
@@ -531,14 +942,21 @@ struct megasas_ctrl_info {
* ===============================
*/
#define MEGASAS_MAX_PD_CHANNELS 2
-#define MEGASAS_MAX_LD_CHANNELS 2
+#define MEGASAS_MAX_LD_CHANNELS 1
#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
MEGASAS_MAX_LD_CHANNELS)
#define MEGASAS_MAX_DEV_PER_CHANNEL 128
#define MEGASAS_DEFAULT_INIT_ID -1
#define MEGASAS_MAX_LUN 8
#define MEGASAS_MAX_LD 64
-
+#define MEGASAS_DEFAULT_CMD_PER_LUN 256
+#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
+ MEGASAS_MAX_DEV_PER_CHANNEL)
+#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
+ MEGASAS_MAX_DEV_PER_CHANNEL)
+
+#define MEGASAS_MAX_SECTORS (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
#define MEGASAS_FW_BUSY 1
@@ -559,6 +977,7 @@ struct megasas_ctrl_info {
#define MEGASAS_RESET_NOTICE_INTERVAL 5
#define MEGASAS_IOCTL_CMD 0
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
+#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
/*
* FW reports the maximum of number of commands that it can accept (maximum
@@ -568,26 +987,50 @@ struct megasas_ctrl_info {
* is shown below
*/
#define MEGASAS_INT_CMDS 32
+#define MEGASAS_SKINNY_INT_CMDS 5
+#define MEGASAS_MAX_MSIX_QUEUES 128
/*
* FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
* SGLs based on the size of dma_addr_t
*/
#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+#define MFI_XSCALE_OMR0_CHANGE_INTERRUPT 0x00000001
+
+#define MFI_INTR_FLAG_REPLY_MESSAGE 0x00000001
+#define MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE 0x00000002
+#define MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT 0x00000004
+
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
-
+#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
+#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
+#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
-
+#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
+#define MFI_GEN2_ENABLE_INTERRUPT_MASK (0x00000001 | 0x00000004)
+#define MFI_REPLY_SKINNY_MESSAGE_INTERRUPT 0x40000000
+#define MFI_SKINNY_ENABLE_INTERRUPT_MASK (0x00000001)
+
+#define MFI_1068_PCSR_OFFSET 0x84
+#define MFI_1068_FW_HANDSHAKE_OFFSET 0x64
+#define MFI_1068_FW_READY 0xDDDD0000
+
+#define MR_MAX_REPLY_QUEUES_OFFSET 0X0000001F
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000
+#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
+#define MR_MAX_MSIX_REG_ARRAY 16
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
*/
struct megasas_register_set {
- u32 reserved_0[4]; /*0000h*/
+ u32 doorbell; /*0000h*/
+ u32 fusion_seq_offset; /*0004h*/
+ u32 fusion_host_diag; /*0008h*/
+ u32 reserved_01; /*000Ch*/
u32 inbound_msg_0; /*0010h*/
u32 inbound_msg_1; /*0014h*/
@@ -607,23 +1050,28 @@ struct megasas_register_set {
u32 inbound_queue_port; /*0040h*/
u32 outbound_queue_port; /*0044h*/
- u32 reserved_2[22]; /*0048h*/
+ u32 reserved_2[9]; /*0048h*/
+ u32 reply_post_host_index; /*006Ch*/
+ u32 reserved_2_2[12]; /*0070h*/
u32 outbound_doorbell_clear; /*00A0h*/
u32 reserved_3[3]; /*00A4h*/
u32 outbound_scratch_pad ; /*00B0h*/
+ u32 outbound_scratch_pad_2; /*00B4h*/
- u32 reserved_4[3]; /*00B4h*/
+ u32 reserved_4[2]; /*00B8h*/
u32 inbound_low_queue_port ; /*00C0h*/
u32 inbound_high_queue_port ; /*00C4h*/
u32 reserved_5; /*00C8h*/
- u32 index_registers[820]; /*00CCh*/
-
+ u32 res_6[11]; /*CCh*/
+ u32 host_diag;
+ u32 seq_offset;
+ u32 index_registers[807]; /*00CCh*/
} __attribute__ ((packed));
struct megasas_sge32 {
@@ -640,10 +1088,17 @@ struct megasas_sge64 {
} __attribute__ ((packed));
+struct megasas_sge_skinny {
+ u64 phys_addr;
+ u32 length;
+ u32 flag;
+} __packed;
+
union megasas_sgl {
struct megasas_sge32 sge32[1];
struct megasas_sge64 sge64[1];
+ struct megasas_sge_skinny sge_skinny[1];
} __attribute__ ((packed));
@@ -675,6 +1130,21 @@ union megasas_sgl_frame {
} __attribute__ ((packed));
+typedef union _MFI_CAPABILITIES {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:30;
+ u32 support_additional_msix:1;
+ u32 support_fp_remote_lun:1;
+#else
+ u32 support_fp_remote_lun:1;
+ u32 support_additional_msix:1;
+ u32 reserved:30;
+#endif
+ } mfi_capabilities;
+ u32 reg;
+} MFI_CAPABILITIES;
+
struct megasas_init_frame {
u8 cmd; /*00h */
@@ -682,7 +1152,7 @@ struct megasas_init_frame {
u8 cmd_status; /*02h */
u8 reserved_1; /*03h */
- u32 reserved_2; /*04h */
+ MFI_CAPABILITIES driver_operations; /*04h*/
u32 context; /*08h */
u32 pad_0; /*0Ch */
@@ -897,9 +1367,15 @@ struct megasas_cmd;
union megasas_evt_class_locale {
struct {
+#ifndef __BIG_ENDIAN_BITFIELD
u16 locale;
u8 reserved;
s8 class;
+#else
+ s8 class;
+ u8 reserved;
+ u16 locale;
+#endif
} __attribute__ ((packed)) members;
u32 word;
@@ -1057,16 +1533,15 @@ struct megasas_evt_detail {
} __attribute__ ((packed));
- struct megasas_instance_template {
- void (*fire_cmd)(dma_addr_t ,u32 ,struct megasas_register_set __iomem *);
-
- void (*enable_intr)(struct megasas_register_set __iomem *) ;
- void (*disable_intr)(struct megasas_register_set __iomem *);
-
- int (*clear_intr)(struct megasas_register_set __iomem *);
+struct megasas_aen_event {
+ struct delayed_work hotplug_work;
+ struct megasas_instance *instance;
+};
- u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
- };
+struct megasas_irq_context {
+ struct megasas_instance *instance;
+ u32 MSIxIndex;
+};
struct megasas_instance {
@@ -1074,22 +1549,37 @@ struct megasas_instance {
dma_addr_t producer_h;
u32 *consumer;
dma_addr_t consumer_h;
+ struct MR_LD_VF_AFFILIATION *vf_affiliation;
+ dma_addr_t vf_affiliation_h;
+ struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111;
+ dma_addr_t vf_affiliation_111_h;
+ struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
+ dma_addr_t hb_host_mem_h;
u32 *reply_queue;
dma_addr_t reply_queue_h;
- unsigned long base_addr;
struct megasas_register_set __iomem *reg_set;
-
+ u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
+ struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
+ struct megasas_pd_list local_pd_list[MEGASAS_MAX_PD];
+ u8 ld_ids[MEGASAS_MAX_LD_IDS];
s8 init_id;
u16 max_num_sge;
u16 max_fw_cmds;
+ /* For Fusion its num IOCTL cmds, for others MFI based its
+ max_fw_cmds */
+ u16 max_mfi_cmds;
u32 max_sectors_per_req;
+ struct megasas_aen_event *ev;
struct megasas_cmd **cmd_list;
struct list_head cmd_pool;
+ /* used to sync fire the cmd to fw */
spinlock_t cmd_pool_lock;
+ /* used to sync fire the cmd to fw */
+ spinlock_t hba_lock;
/* used to synch producer, consumer ptrs in dpc */
spinlock_t completion_lock;
struct dma_pool *frame_dma_pool;
@@ -1108,17 +1598,137 @@ struct megasas_instance {
struct pci_dev *pdev;
u32 unique_id;
+ u32 fw_support_ieee;
atomic_t fw_outstanding;
- u32 hw_crit_error;
+ atomic_t fw_reset_no_pci_access;
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
+ struct work_struct work_init;
u8 flag;
+ u8 unload;
+ u8 flag_ieee;
+ u8 issuepend_done;
+ u8 disableOnlineCtrlReset;
+ u8 UnevenSpanSupport;
+ u8 adprecovery;
unsigned long last_time;
+ u32 mfiStatus;
+ u32 last_seq_num;
+
+ struct list_head internal_reset_pending_q;
+
+ /* Ptr to hba specific information */
+ void *ctrl_context;
+ unsigned int msix_vectors;
+ struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
+ struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
+ u64 map_id;
+ struct megasas_cmd *map_update_cmd;
+ unsigned long bar;
+ long reset_flags;
+ struct mutex reset_mutex;
+ struct timer_list sriov_heartbeat_timer;
+ char skip_heartbeat_timer_del;
+ u8 requestorId;
+ u64 initiator_sas_address;
+ u64 ld_sas_address[64];
+ char PlasmaFW111;
+ char mpio;
+ int throttlequeuedepth;
+ u8 mask_interrupts;
+ u8 is_imr;
+};
+struct MR_LD_VF_MAP {
+ u32 size;
+ union MR_LD_REF ref;
+ u8 ldVfCount;
+ u8 reserved[6];
+ u8 policy[1];
+};
+
+struct MR_LD_VF_AFFILIATION {
+ u32 size;
+ u8 ldCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[9];
+ struct MR_LD_VF_MAP map[1];
+};
+
+/* Plasma 1.11 FW backward compatibility structures */
+#define IOV_111_OFFSET 0x7CE
+#define MAX_VIRTUAL_FUNCTIONS 8
+
+struct IOV_111 {
+ u8 maxVFsSupported;
+ u8 numVFsEnabled;
+ u8 requestorId;
+ u8 reserved[5];
+};
+
+struct MR_LD_VF_MAP_111 {
+ u8 targetId;
+ u8 reserved[3];
+ u8 policy[MAX_VIRTUAL_FUNCTIONS];
+};
+
+struct MR_LD_VF_AFFILIATION_111 {
+ u8 vdCount;
+ u8 vfCount;
+ u8 thisVf;
+ u8 reserved[5];
+ struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES];
+};
+
+struct MR_CTRL_HB_HOST_MEM {
+ struct {
+ u32 fwCounter; /* Firmware heart beat counter */
+ struct {
+ u32 debugmode:1; /* 1=Firmware is in debug mode.
+ Heart beat will not be updated. */
+ u32 reserved:31;
+ } debug;
+ u32 reserved_fw[6];
+ u32 driverCounter; /* Driver heart beat counter. 0x20 */
+ u32 reserved_driver[7];
+ } HB;
+ u8 pad[0x400-0x40];
+};
+
+enum {
+ MEGASAS_HBA_OPERATIONAL = 0,
+ MEGASAS_ADPRESET_SM_INFAULT = 1,
+ MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS = 2,
+ MEGASAS_ADPRESET_SM_OPERATIONAL = 3,
+ MEGASAS_HW_CRITICAL_ERROR = 4,
+ MEGASAS_ADPRESET_SM_POLLING = 5,
+ MEGASAS_ADPRESET_INPROG_SIGN = 0xDEADDEAD,
+};
+
+struct megasas_instance_template {
+ void (*fire_cmd)(struct megasas_instance *, dma_addr_t, \
+ u32, struct megasas_register_set __iomem *);
+
+ void (*enable_intr)(struct megasas_instance *);
+ void (*disable_intr)(struct megasas_instance *);
- struct timer_list io_completion_timer;
+ int (*clear_intr)(struct megasas_register_set __iomem *);
+
+ u32 (*read_fw_status_reg)(struct megasas_register_set __iomem *);
+ int (*adp_reset)(struct megasas_instance *, \
+ struct megasas_register_set __iomem *);
+ int (*check_reset)(struct megasas_instance *, \
+ struct megasas_register_set __iomem *);
+ irqreturn_t (*service_isr)(int irq, void *devp);
+ void (*tasklet)(unsigned long);
+ u32 (*init_adapter)(struct megasas_instance *);
+ u32 (*build_and_issue_cmd) (struct megasas_instance *,
+ struct scsi_cmnd *);
+ void (*issue_dcmd) (struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
};
#define MEGASAS_IS_LOGICAL(scp) \
@@ -1138,12 +1748,20 @@ struct megasas_cmd {
u32 index;
u8 sync_cmd;
u8 cmd_status;
- u16 abort_aen;
+ u8 abort_aen;
+ u8 retry_for_fw_reset;
+
struct list_head list;
struct scsi_cmnd *scmd;
struct megasas_instance *instance;
- u32 frame_count;
+ union {
+ struct {
+ u16 smid;
+ u16 resvd;
+ } context;
+ u32 frame_count;
+ };
};
#define MAX_MGMT_ADAPTERS 1024
@@ -1201,4 +1819,16 @@ struct megasas_mgmt_info {
int max_index;
};
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
new file mode 100644
index 00000000000..112799b131a
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -0,0 +1,6325 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2003-2012 LSI Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * FILE: megaraid_sas_base.c
+ * Version : 06.803.01.00-rc1
+ *
+ * Authors: LSI Corporation
+ * Sreenivas Bagalkote
+ * Sumant Patro
+ * Bo Yang
+ * Adam Radford <linuxraid@lsi.com>
+ *
+ * Send feedback to: <megaraidlinux@lsi.com>
+ *
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: Linuxraid
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+ "Maximum number of sectors per IO command");
+
+static int msix_disable;
+module_param(msix_disable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
+
+static unsigned int msix_vectors;
+module_param(msix_vectors, int, S_IRUGO);
+MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
+
+static int allow_vf_ioctls;
+module_param(allow_vf_ioctls, int, S_IRUGO);
+MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
+
+static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
+module_param(throttlequeuedepth, int, S_IRUGO);
+MODULE_PARM_DESC(throttlequeuedepth,
+ "Adapter queue depth when throttled due to I/O timeout. Default: 16");
+
+int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+module_param(resetwaittime, int, S_IRUGO);
+MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
+ "before resetting adapter. Default: 180");
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MEGASAS_VERSION);
+MODULE_AUTHOR("megaraidlinux@lsi.com");
+MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
+
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+static int megasas_get_pd_list(struct megasas_instance *instance);
+static int megasas_ld_list_query(struct megasas_instance *instance,
+ u8 query_type);
+static int megasas_issue_init_mfi(struct megasas_instance *instance);
+static int megasas_register_aen(struct megasas_instance *instance,
+ u32 seq_num, u32 class_locale_word);
+/*
+ * PCI ID table for all supported controllers
+ */
+static struct pci_device_id megasas_pci_table[] = {
+
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
+ /* xscale IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
+ /* ppc IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
+ /* ppc IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
+ /* gen2*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
+ /* gen2*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
+ /* skinny*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
+ /* skinny*/
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
+ /* xscale IOP, vega */
+ {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
+ /* xscale IOP */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
+ /* Fusion */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
+ /* Plasma */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
+ /* Invader */
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
+ /* Fury */
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, megasas_pci_table);
+
+static int megasas_mgmt_majorno;
+struct megasas_mgmt_info megasas_mgmt_info;
+static struct fasync_struct *megasas_async_queue;
+static DEFINE_MUTEX(megasas_async_queue_mutex);
+
+static int megasas_poll_wait_aen;
+static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
+static u32 support_poll_for_event;
+u32 megasas_dbg_lvl;
+static u32 support_device_change;
+
+/* define lock for aen poll */
+spinlock_t poll_aen_lock;
+
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status);
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set);
+static irqreturn_t megasas_isr(int irq, void *devp);
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance);
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
+static void megasas_complete_cmd_dpc(unsigned long instance_addr);
+void
+megasas_release_fusion(struct megasas_instance *instance);
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance);
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance);
+u8
+megasas_get_map_info(struct megasas_instance *instance);
+int
+megasas_sync_map_info(struct megasas_instance *instance);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
+void megasas_reset_reply_desc(struct megasas_instance *instance);
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
+void megasas_fusion_ocr_wq(struct work_struct *work);
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd);
+
+void
+megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr, 0, instance->reg_set);
+}
+
+/**
+ * megasas_get_cmd - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+ *instance)
+{
+ unsigned long flags;
+ struct megasas_cmd *cmd = NULL;
+
+ spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+
+ if (!list_empty(&instance->cmd_pool)) {
+ cmd = list_entry((&instance->cmd_pool)->next,
+ struct megasas_cmd, list);
+ list_del_init(&cmd->list);
+ } else {
+ printk(KERN_ERR "megasas: Command pool empty!\n");
+ }
+
+ spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ return cmd;
+}
+
+/**
+ * megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+
+ cmd->scmd = NULL;
+ cmd->frame_count = 0;
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+ list_add_tail(&cmd->list, &instance->cmd_pool);
+
+ spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+}
+
+
+/**
+* The following functions are defined for xscale
+* (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+ * megasas_enable_intr_xscale - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_xscale(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_xscale -Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_xscale(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0x1f;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_xscale - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
+{
+ return readl(&(regs)->outbound_msg_0);
+}
+/**
+ * megasas_clear_interrupt_xscale - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_OB_INTR_STATUS_MASK)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+ if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ if (mfiStatus)
+ writel(status, &regs->outbound_intr_status);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_xscale - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_xscale(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr >> 3)|(frame_count),
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_xscale - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_xscale(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ u32 i;
+ u32 pcidata;
+ writel(MFI_ADP_RESET, &regs->inbound_doorbell);
+
+ for (i = 0; i < 3; i++)
+ msleep(1000); /* sleep for 3 secs */
+ pcidata = 0;
+ pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
+ printk(KERN_NOTICE "pcidata = %x\n", pcidata);
+ if (pcidata & 0x2) {
+ printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
+ pcidata &= ~0x2;
+ pci_write_config_dword(instance->pdev,
+ MFI_1068_PCSR_OFFSET, pcidata);
+
+ for (i = 0; i < 2; i++)
+ msleep(1000); /* need to wait 2 secs again */
+
+ pcidata = 0;
+ pci_read_config_dword(instance->pdev,
+ MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
+ printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
+ if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
+ printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
+ pcidata = 0;
+ pci_write_config_dword(instance->pdev,
+ MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
+ }
+ }
+ return 0;
+}
+
+/**
+ * megasas_check_reset_xscale - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_xscale(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+
+ if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ (le32_to_cpu(*instance->consumer) ==
+ MEGASAS_ADPRESET_INPROG_SIGN))
+ return 1;
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_xscale = {
+
+ .fire_cmd = megasas_fire_cmd_xscale,
+ .enable_intr = megasas_enable_intr_xscale,
+ .disable_intr = megasas_disable_intr_xscale,
+ .clear_intr = megasas_clear_intr_xscale,
+ .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
+ .adp_reset = megasas_adp_reset_xscale,
+ .check_reset = megasas_check_reset_xscale,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+* This is the end of set of functions & definitions specific
+* to xscale (deviceid : 1064R, PERC5) controllers
+*/
+
+/**
+* The following functions are defined for ppc (deviceid : 0x60)
+* controllers
+*/
+
+/**
+ * megasas_enable_intr_ppc - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_ppc(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+ writel(~0x80000000, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_ppc - Disable interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_ppc(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_ppc - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_ppc - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
+{
+ u32 status, mfiStatus = 0;
+
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &regs->outbound_doorbell_clear);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_doorbell_clear);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_ppc - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_ppc(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr | (frame_count<<1))|1,
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_ppc - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_ppc(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_ppc = {
+
+ .fire_cmd = megasas_fire_cmd_ppc,
+ .enable_intr = megasas_enable_intr_ppc,
+ .disable_intr = megasas_disable_intr_ppc,
+ .clear_intr = megasas_clear_intr_ppc,
+ .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
+ .adp_reset = megasas_adp_reset_xscale,
+ .check_reset = megasas_check_reset_ppc,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+ * megasas_enable_intr_skinny - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_skinny(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
+
+ writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_skinny - Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_skinny(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_skinny - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_skinny - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
+ return 0;
+ }
+
+ /*
+ * Check if it is our interrupt
+ */
+ if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
+ MFI_STATE_FAULT) {
+ mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ } else
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ writel(status, &regs->outbound_intr_status);
+
+ /*
+ * dummy read to flush PCI
+ */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+
+/**
+ * megasas_fire_cmd_skinny - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_skinny(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel(upper_32_bits(frame_phys_addr),
+ &(regs)->inbound_high_queue_port);
+ writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
+ &(regs)->inbound_low_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_check_reset_skinny - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_skinny(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ return 1;
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_skinny = {
+
+ .fire_cmd = megasas_fire_cmd_skinny,
+ .enable_intr = megasas_enable_intr_skinny,
+ .disable_intr = megasas_disable_intr_skinny,
+ .clear_intr = megasas_clear_intr_skinny,
+ .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
+ .adp_reset = megasas_adp_reset_gen2,
+ .check_reset = megasas_check_reset_skinny,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+
+/**
+* The following functions are defined for gen2 (deviceid : 0x78 0x79)
+* controllers
+*/
+
+/**
+ * megasas_enable_intr_gen2 - Enables interrupts
+ * @regs: MFI register set
+ */
+static inline void
+megasas_enable_intr_gen2(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
+
+ /* write ~0x00000005 (4 & 1) to the intr mask*/
+ writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_disable_intr_gen2 - Disables interrupt
+ * @regs: MFI register set
+ */
+static inline void
+megasas_disable_intr_gen2(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ u32 mask = 0xFFFFFFFF;
+ regs = instance->reg_set;
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+}
+
+/**
+ * megasas_read_fw_status_reg_gen2 - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_clear_interrupt_gen2 - Check & clear interrupt
+ * @regs: MFI register set
+ */
+static int
+megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ u32 mfiStatus = 0;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+ }
+ if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
+ mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ }
+
+ /*
+ * Clear the interrupt by writing back the same value
+ */
+ if (mfiStatus)
+ writel(status, &regs->outbound_doorbell_clear);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_status);
+
+ return mfiStatus;
+}
+/**
+ * megasas_fire_cmd_gen2 - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+static inline void
+megasas_fire_cmd_gen2(struct megasas_instance *instance,
+ dma_addr_t frame_phys_addr,
+ u32 frame_count,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ writel((frame_phys_addr | (frame_count<<1))|1,
+ &(regs)->inbound_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * megasas_adp_reset_gen2 - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set)
+{
+ u32 retry = 0 ;
+ u32 HostDiag;
+ u32 *seq_offset = &reg_set->seq_offset;
+ u32 *hostdiag_offset = &reg_set->host_diag;
+
+ if (instance->instancet == &megasas_instance_template_skinny) {
+ seq_offset = &reg_set->fusion_seq_offset;
+ hostdiag_offset = &reg_set->fusion_host_diag;
+ }
+
+ writel(0, seq_offset);
+ writel(4, seq_offset);
+ writel(0xb, seq_offset);
+ writel(2, seq_offset);
+ writel(7, seq_offset);
+ writel(0xd, seq_offset);
+
+ msleep(1000);
+
+ HostDiag = (u32)readl(hostdiag_offset);
+
+ while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
+ msleep(100);
+ HostDiag = (u32)readl(hostdiag_offset);
+ printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
+ retry, HostDiag);
+
+ if (retry++ >= 100)
+ return 1;
+
+ }
+
+ printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
+
+ writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
+
+ ssleep(10);
+
+ HostDiag = (u32)readl(hostdiag_offset);
+ while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
+ msleep(100);
+ HostDiag = (u32)readl(hostdiag_offset);
+ printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
+ retry, HostDiag);
+
+ if (retry++ >= 1000)
+ return 1;
+
+ }
+ return 0;
+}
+
+/**
+ * megasas_check_reset_gen2 - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct megasas_instance_template megasas_instance_template_gen2 = {
+
+ .fire_cmd = megasas_fire_cmd_gen2,
+ .enable_intr = megasas_enable_intr_gen2,
+ .disable_intr = megasas_disable_intr_gen2,
+ .clear_intr = megasas_clear_intr_gen2,
+ .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
+ .adp_reset = megasas_adp_reset_gen2,
+ .check_reset = megasas_check_reset_gen2,
+ .service_isr = megasas_isr,
+ .tasklet = megasas_complete_cmd_dpc,
+ .init_adapter = megasas_init_adapter_mfi,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd,
+ .issue_dcmd = megasas_issue_dcmd,
+};
+
+/**
+* This is the end of set of functions & definitions
+* specific to gen2 (deviceid : 0x78, 0x79) controllers
+*/
+
+/*
+ * Template added for TB (Fusion)
+ */
+extern struct megasas_instance_template megasas_instance_template_fusion;
+
+/**
+ * megasas_issue_polled - Issues a polling command
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ int seconds;
+
+ struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+ frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+ /*
+ * Issue the frame using inbound queue port
+ */
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ /*
+ * Wait for cmd_status to change
+ */
+ if (instance->requestorId)
+ seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
+ else
+ seconds = MFI_POLL_TIMEOUT_SECS;
+ return wait_and_poll(instance, cmd, seconds);
+}
+
+/**
+ * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
+ * @instance: Adapter soft state
+ * @cmd: Command to be issued
+ * @timeout: Timeout in seconds
+ *
+ * This function waits on an event for the command to be returned from ISR.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ * Used to issue ioctl commands.
+ */
+static int
+megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, int timeout)
+{
+ int ret = 0;
+ cmd->cmd_status = ENODATA;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+ if (timeout) {
+ ret = wait_event_timeout(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret)
+ return 1;
+ } else
+ wait_event(instance->int_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
+ return 0;
+}
+
+/**
+ * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
+ * @instance: Adapter soft state
+ * @cmd_to_abort: Previously issued cmd to be aborted
+ * @timeout: Timeout in seconds
+ *
+ * MFI firmware can abort previously issued AEN comamnd (automatic event
+ * notification). The megasas_issue_blocked_abort_cmd() issues such abort
+ * cmd and waits for return status.
+ * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
+ */
+static int
+megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_to_abort, int timeout)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_abort_frame *abort_fr;
+ int ret = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -1;
+
+ abort_fr = &cmd->frame->abort;
+
+ /*
+ * Prepare and issue the abort frame
+ */
+ abort_fr->cmd = MFI_CMD_ABORT;
+ abort_fr->cmd_status = 0xFF;
+ abort_fr->flags = cpu_to_le16(0);
+ abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
+ abort_fr->abort_mfi_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
+ abort_fr->abort_mfi_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
+
+ cmd->sync_cmd = 1;
+ cmd->cmd_status = 0xFF;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ if (timeout) {
+ ret = wait_event_timeout(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA, timeout * HZ);
+ if (!ret) {
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ return 1;
+ }
+ } else
+ wait_event(instance->abort_cmd_wait_q,
+ cmd->cmd_status != ENODATA);
+
+ cmd->sync_cmd = 0;
+
+ megasas_return_cmd(instance, cmd);
+ return 0;
+}
+
+/**
+ * megasas_make_sgl32 - Prepares 32-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
+ }
+ }
+ return sge_count;
+}
+
+/**
+ * megasas_make_sgl64 - Prepares 64-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+ BUG_ON(sge_count < 0);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
+ }
+ }
+ return sge_count;
+}
+
+/**
+ * megasas_make_sgl_skinny - Prepares IEEE SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @mfi_sgl: SGL to be filled in
+ *
+ * If successful, this function returns the number of SG elements. Otherwise,
+ * it returnes -1.
+ */
+static int
+megasas_make_sgl_skinny(struct megasas_instance *instance,
+ struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
+{
+ int i;
+ int sge_count;
+ struct scatterlist *os_sgl;
+
+ sge_count = scsi_dma_map(scp);
+
+ if (sge_count) {
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ mfi_sgl->sge_skinny[i].length =
+ cpu_to_le32(sg_dma_len(os_sgl));
+ mfi_sgl->sge_skinny[i].phys_addr =
+ cpu_to_le64(sg_dma_address(os_sgl));
+ mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
+ }
+ }
+ return sge_count;
+}
+
+ /**
+ * megasas_get_frame_count - Computes the number of frames
+ * @frame_type : type of frame- io or pthru frame
+ * @sge_count : number of sg elements
+ *
+ * Returns the number of frames required for numnber of sge's (sge_count)
+ */
+
+static u32 megasas_get_frame_count(struct megasas_instance *instance,
+ u8 sge_count, u8 frame_type)
+{
+ int num_cnt;
+ int sge_bytes;
+ u32 sge_sz;
+ u32 frame_count=0;
+
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (instance->flag_ieee) {
+ sge_sz = sizeof(struct megasas_sge_skinny);
+ }
+
+ /*
+ * Main frame can contain 2 SGEs for 64-bit SGLs and
+ * 3 SGEs for 32-bit SGLs for ldio &
+ * 1 SGEs for 64-bit SGLs and
+ * 2 SGEs for 32-bit SGLs for pthru frame
+ */
+ if (unlikely(frame_type == PTHRU_FRAME)) {
+ if (instance->flag_ieee == 1) {
+ num_cnt = sge_count - 1;
+ } else if (IS_DMA64)
+ num_cnt = sge_count - 1;
+ else
+ num_cnt = sge_count - 2;
+ } else {
+ if (instance->flag_ieee == 1) {
+ num_cnt = sge_count - 1;
+ } else if (IS_DMA64)
+ num_cnt = sge_count - 2;
+ else
+ num_cnt = sge_count - 3;
+ }
+
+ if(num_cnt>0){
+ sge_bytes = sge_sz * num_cnt;
+
+ frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
+ ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
+ }
+ /* Main frame */
+ frame_count +=1;
+
+ if (frame_count > 7)
+ frame_count = 8;
+ return frame_count;
+}
+
+/**
+ * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static int
+megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 is_logical;
+ u32 device_id;
+ u16 flags = 0;
+ struct megasas_pthru_frame *pthru;
+
+ is_logical = MEGASAS_IS_LOGICAL(scp);
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ pthru = (struct megasas_pthru_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+ else if (scp->sc_data_direction == PCI_DMA_NONE)
+ flags = MFI_FRAME_DIR_NONE;
+
+ if (instance->flag_ieee == 1) {
+ flags |= MFI_FRAME_IEEE;
+ }
+
+ /*
+ * Prepare the DCDB frame
+ */
+ pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
+ pthru->cmd_status = 0x0;
+ pthru->scsi_status = 0x0;
+ pthru->target_id = device_id;
+ pthru->lun = scp->device->lun;
+ pthru->cdb_len = scp->cmd_len;
+ pthru->timeout = 0;
+ pthru->pad_0 = 0;
+ pthru->flags = cpu_to_le16(flags);
+ pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
+
+ memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
+
+ /*
+ * If the command is for the tape device, set the
+ * pthru timeout to the os layer timeout value.
+ */
+ if (scp->device->type == TYPE_TAPE) {
+ if ((scp->request->timeout / HZ) > 0xFFFF)
+ pthru->timeout = 0xFFFF;
+ else
+ pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
+ }
+
+ /*
+ * Construct SGL
+ */
+ if (instance->flag_ieee == 1) {
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
+ &pthru->sgl);
+ } else if (IS_DMA64) {
+ pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ pthru->sge_count = megasas_make_sgl64(instance, scp,
+ &pthru->sgl);
+ } else
+ pthru->sge_count = megasas_make_sgl32(instance, scp,
+ &pthru->sgl);
+
+ if (pthru->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+ pthru->sge_count);
+ return 0;
+ }
+
+ /*
+ * Sense info specific
+ */
+ pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
+ pthru->sense_buf_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
+ pthru->sense_buf_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
+
+ /*
+ * Compute the total number of frames this command consumes. FW uses
+ * this number to pull sufficient number of frames from host memory.
+ */
+ cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
+ PTHRU_FRAME);
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_build_ldio - Prepares IOs to logical devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
+ */
+static int
+megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
+ struct megasas_cmd *cmd)
+{
+ u32 device_id;
+ u8 sc = scp->cmnd[0];
+ u16 flags = 0;
+ struct megasas_io_frame *ldio;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+ ldio = (struct megasas_io_frame *)cmd->frame;
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ flags = MFI_FRAME_DIR_WRITE;
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ flags = MFI_FRAME_DIR_READ;
+
+ if (instance->flag_ieee == 1) {
+ flags |= MFI_FRAME_IEEE;
+ }
+
+ /*
+ * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
+ */
+ ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
+ ldio->cmd_status = 0x0;
+ ldio->scsi_status = 0x0;
+ ldio->target_id = device_id;
+ ldio->timeout = 0;
+ ldio->reserved_0 = 0;
+ ldio->pad_0 = 0;
+ ldio->flags = cpu_to_le16(flags);
+ ldio->start_lba_hi = 0;
+ ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) |
+ (u32) scp->cmnd[3]);
+
+ ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8));
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
+
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+ }
+
+ /*
+ * 16-byte READ(0x88) or WRITE(0x8A) cdb
+ */
+ else if (scp->cmd_len == 16) {
+ ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) |
+ (u32) scp->cmnd[13]);
+
+ ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) |
+ (u32) scp->cmnd[9]);
+
+ ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) |
+ (u32) scp->cmnd[5]);
+
+ }
+
+ /*
+ * Construct SGL
+ */
+ if (instance->flag_ieee) {
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
+ &ldio->sgl);
+ } else if (IS_DMA64) {
+ ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
+ ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
+ } else
+ ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
+
+ if (ldio->sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+ ldio->sge_count);
+ return 0;
+ }
+
+ /*
+ * Sense info specific
+ */
+ ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
+ ldio->sense_buf_phys_addr_hi = 0;
+ ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
+
+ /*
+ * Compute the total number of frames this command consumes. FW uses
+ * this number to pull sufficient number of frames from host memory.
+ */
+ cmd->frame_count = megasas_get_frame_count(instance,
+ ldio->sge_count, IO_FRAME);
+
+ return cmd->frame_count;
+}
+
+/**
+ * megasas_is_ldio - Checks if the cmd is for logical drive
+ * @scmd: SCSI command
+ *
+ * Called by megasas_queue_command to find out if the command to be queued
+ * is a logical drive command
+ */
+inline int megasas_is_ldio(struct scsi_cmnd *cmd)
+{
+ if (!MEGASAS_IS_LOGICAL(cmd))
+ return 0;
+ switch (cmd->cmnd[0]) {
+ case READ_10:
+ case WRITE_10:
+ case READ_12:
+ case WRITE_12:
+ case READ_6:
+ case WRITE_6:
+ case READ_16:
+ case WRITE_16:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+ /**
+ * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
+ * in FW
+ * @instance: Adapter soft state
+ */
+static inline void
+megasas_dump_pending_frames(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ int i,n;
+ union megasas_sgl *mfi_sgl;
+ struct megasas_io_frame *ldio;
+ struct megasas_pthru_frame *pthru;
+ u32 sgcount;
+ u32 max_cmd = instance->max_fw_cmds;
+
+ printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
+ printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
+ if (IS_DMA64)
+ printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
+ else
+ printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
+
+ printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ if(!cmd->scmd)
+ continue;
+ printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
+ if (megasas_is_ldio(cmd->scmd)){
+ ldio = (struct megasas_io_frame *)cmd->frame;
+ mfi_sgl = &ldio->sgl;
+ sgcount = ldio->sge_count;
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+ " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
+ le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
+ le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
+ }
+ else {
+ pthru = (struct megasas_pthru_frame *) cmd->frame;
+ mfi_sgl = &pthru->sgl;
+ sgcount = pthru->sge_count;
+ printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+ "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
+ instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
+ pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
+ le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
+ }
+ if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
+ for (n = 0; n < sgcount; n++){
+ if (IS_DMA64)
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
+ le32_to_cpu(mfi_sgl->sge64[n].length),
+ le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
+ else
+ printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
+ le32_to_cpu(mfi_sgl->sge32[n].length),
+ le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
+ }
+ }
+ printk(KERN_ERR "\n");
+ } /*for max_cmd*/
+ printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if(cmd->sync_cmd == 1){
+ printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
+ }
+ }
+ printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
+}
+
+u32
+megasas_build_and_issue_cmd(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd *cmd;
+ u32 frame_count;
+
+ cmd = megasas_get_cmd(instance);
+ if (!cmd)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Logical drive command
+ */
+ if (megasas_is_ldio(scmd))
+ frame_count = megasas_build_ldio(instance, scmd, cmd);
+ else
+ frame_count = megasas_build_dcdb(instance, scmd, cmd);
+
+ if (!frame_count)
+ goto out_return_cmd;
+
+ cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *)cmd;
+
+ /*
+ * Issue the command to the FW
+ */
+ atomic_inc(&instance->fw_outstanding);
+
+ instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
+ cmd->frame_count-1, instance->reg_set);
+
+ return 0;
+out_return_cmd:
+ megasas_return_cmd(instance, cmd);
+ return 1;
+}
+
+
+/**
+ * megasas_queue_command - Queue entry point
+ * @scmd: SCSI command to be queued
+ * @done: Callback entry point
+ */
+static int
+megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
+{
+ struct megasas_instance *instance;
+ unsigned long flags;
+
+ instance = (struct megasas_instance *)
+ scmd->device->host->hostdata;
+
+ if (instance->issuepend_done == 0)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+
+ /* Check for an mpio path and adjust behavior */
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (megasas_check_mpio_paths(instance, scmd) ==
+ (DID_RESET << 16)) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ } else {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ done(scmd);
+ return 0;
+ }
+ }
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ scmd->result = DID_NO_CONNECT << 16;
+ done(scmd);
+ return 0;
+ }
+
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ scmd->scsi_done = done;
+ scmd->result = 0;
+
+ if (MEGASAS_IS_LOGICAL(scmd) &&
+ (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
+ scmd->result = DID_BAD_TARGET << 16;
+ goto out_done;
+ }
+
+ switch (scmd->cmnd[0]) {
+ case SYNCHRONIZE_CACHE:
+ /*
+ * FW takes care of flush cache on its own
+ * No need to send it down
+ */
+ scmd->result = DID_OK << 16;
+ goto out_done;
+ default:
+ break;
+ }
+
+ if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
+ printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ return 0;
+
+ out_done:
+ done(scmd);
+ return 0;
+}
+
+static DEF_SCSI_QCMD(megasas_queue_command)
+
+static struct megasas_instance *megasas_lookup_instance(u16 host_no)
+{
+ int i;
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+
+ if ((megasas_mgmt_info.instance[i]) &&
+ (megasas_mgmt_info.instance[i]->host->host_no == host_no))
+ return megasas_mgmt_info.instance[i];
+ }
+
+ return NULL;
+}
+
+static int megasas_slave_configure(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ struct megasas_instance *instance ;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ /*
+ * Don't export physical disk devices to the disk driver.
+ *
+ * FIXME: Currently we don't export them to the midlayer at all.
+ * That will be fixed once LSI engineers have audited the
+ * firmware for possible issues.
+ */
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
+ sdev->type == TYPE_DISK) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ blk_queue_rq_timeout(sdev->request_queue,
+ MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+ return 0;
+ }
+ return -ENXIO;
+ }
+
+ /*
+ * The RAID firmware may require extended timeouts.
+ */
+ blk_queue_rq_timeout(sdev->request_queue,
+ MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+ return 0;
+}
+
+static int megasas_slave_alloc(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ struct megasas_instance *instance ;
+ instance = megasas_lookup_instance(sdev->host->host_no);
+ if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
+ (sdev->type == TYPE_DISK)) {
+ /*
+ * Open the OS scan to the SYSTEM PD
+ */
+ pd_index =
+ (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ if ((instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) &&
+ (instance->pd_list[pd_index].driveType ==
+ TYPE_DISK)) {
+ return 0;
+ }
+ return -ENXIO;
+ }
+ return 0;
+}
+
+void megaraid_sas_kill_hba(struct megasas_instance *instance)
+{
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
+ /* Flush */
+ readl(&instance->reg_set->doorbell);
+ if (instance->mpio && instance->requestorId)
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ } else {
+ writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
+ }
+}
+
+ /**
+ * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
+ * restored to max value
+ * @instance: Adapter soft state
+ *
+ */
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
+{
+ unsigned long flags;
+ if (instance->flag & MEGASAS_FW_BUSY
+ && time_after(jiffies, instance->last_time + 5 * HZ)
+ && atomic_read(&instance->fw_outstanding) <
+ instance->throttlequeuedepth + 1) {
+
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ instance->flag &= ~MEGASAS_FW_BUSY;
+ if (instance->is_imr) {
+ instance->host->can_queue =
+ instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
+ } else
+ instance->host->can_queue =
+ instance->max_fw_cmds - MEGASAS_INT_CMDS;
+
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+}
+
+/**
+ * megasas_complete_cmd_dpc - Returns FW's controller structure
+ * @instance_addr: Address of adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
+{
+ u32 producer;
+ u32 consumer;
+ u32 context;
+ struct megasas_cmd *cmd;
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+ unsigned long flags;
+
+ /* If we have already declared adapter dead, donot complete cmds */
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
+ return;
+
+ spin_lock_irqsave(&instance->completion_lock, flags);
+
+ producer = le32_to_cpu(*instance->producer);
+ consumer = le32_to_cpu(*instance->consumer);
+
+ while (consumer != producer) {
+ context = le32_to_cpu(instance->reply_queue[consumer]);
+ if (context >= instance->max_fw_cmds) {
+ printk(KERN_ERR "Unexpected context value %x\n",
+ context);
+ BUG();
+ }
+
+ cmd = instance->cmd_list[context];
+
+ megasas_complete_cmd(instance, cmd, DID_OK);
+
+ consumer++;
+ if (consumer == (instance->max_fw_cmds + 1)) {
+ consumer = 0;
+ }
+ }
+
+ *instance->consumer = cpu_to_le32(producer);
+
+ spin_unlock_irqrestore(&instance->completion_lock, flags);
+
+ /*
+ * Check if we can restore can_queue
+ */
+ megasas_check_and_restore_queue_depth(instance);
+}
+
+/**
+ * megasas_start_timer - Initializes a timer object
+ * @instance: Adapter soft state
+ * @timer: timer object to be initialized
+ * @fn: timer function
+ * @interval: time interval between timer function call
+ *
+ */
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval)
+{
+ init_timer(timer);
+ timer->expires = jiffies + interval;
+ timer->data = (unsigned long)instance;
+ timer->function = fn;
+ add_timer(timer);
+}
+
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+ *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+ }
+ instance->instancet->disable_intr(instance);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->issuepend_done = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+ megasas_internal_reset_defer_cmds(instance);
+ process_fw_state_change_wq(&instance->work_init);
+}
+
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+ struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
+ struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+ dma_addr_t new_affiliation_h;
+ dma_addr_t new_affiliation_111_h;
+ int ld, retval = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
+ "affiliation: Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ if (instance->PlasmaFW111)
+ memset(instance->vf_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else
+ memset(instance->vf_affiliation, 0,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ else {
+ if (instance->PlasmaFW111)
+ new_affiliation_111 =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
+ else
+ new_affiliation =
+ pci_alloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
+ if (!new_affiliation && !new_affiliation_111) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ if (instance->PlasmaFW111)
+ memset(new_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ else
+ memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ if (instance->PlasmaFW111) {
+ dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
+ } else {
+ dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+ }
+
+ if (initial) {
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_h;
+ } else {
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+ }
+ if (instance->PlasmaFW111)
+ dcmd->sgl.sge32[0].length =
+ sizeof(struct MR_LD_VF_AFFILIATION_111);
+ else
+ dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ if (instance->PlasmaFW111) {
+ if (!new_affiliation_111->vdCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new "
+ "LD/VF affiliation for passive path "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ thisVf = new_affiliation_111->thisVf;
+ for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+ if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation_111,
+ new_affiliation_111,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
+ retval = 1;
+ goto out;
+ }
+ } else {
+ if (!new_affiliation->ldCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new "
+ "LD/VF affiliation for passive "
+ "path for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+ thisVf = new_affiliation->thisVf;
+ for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
+ if (savedmap->policy[thisVf] !=
+ newmap->policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ memcpy(instance->vf_affiliation,
+ new_affiliation,
+ new_affiliation->size);
+ retval = 1;
+ goto out;
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap +
+ newmap->size);
+ }
+ }
+ }
+out:
+ if (new_affiliation) {
+ if (instance->PlasmaFW111)
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ new_affiliation_111,
+ new_affiliation_111_h);
+ else
+ pci_free_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ new_affiliation, new_affiliation_h);
+ }
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* This function will tell FW to start the SR-IOV heartbeat */
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ int retval = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (initial) {
+ instance->hb_host_mem =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h);
+ if (!instance->hb_host_mem) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
+ " memory for heartbeat host memory for "
+ "scsi%d.\n", instance->host->host_no);
+ retval = -ENOMEM;
+ goto out;
+ }
+ memset(instance->hb_host_mem, 0,
+ sizeof(struct MR_CTRL_HB_HOST_MEM));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM);
+ dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC;
+ dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h;
+ dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+ instance->host->host_no);
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ retval = 0;
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD timed out for scsi%d\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
+ "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n",
+ dcmd->cmd_status,
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+
+out:
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+/* Handler for SR-IOV heartbeat */
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ mod_timer(&instance->sriov_heartbeat_timer,
+ jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ } else {
+ printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+ "completed for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+}
+
+/**
+ * megasas_wait_for_outstanding - Wait for all outstanding cmds
+ * @instance: Adapter soft state
+ *
+ * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
+ * complete all its outstanding commands. Returns error if one or more IOs
+ * are pending after this time period. It also marks the controller dead.
+ */
+static int megasas_wait_for_outstanding(struct megasas_instance *instance)
+{
+ int i;
+ u32 reset_index;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+ u8 adprecovery;
+ unsigned long flags;
+ struct list_head clist_local;
+ struct megasas_cmd *reset_cmd;
+ u32 fw_state;
+ u8 kill_adapter_flag;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ adprecovery = instance->adprecovery;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+
+ INIT_LIST_HEAD(&clist_local);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ list_splice_init(&instance->internal_reset_pending_q,
+ &clist_local);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
+ for (i = 0; i < wait_time; i++) {
+ msleep(1000);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ adprecovery = instance->adprecovery;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ }
+
+ if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return FAILED;
+ }
+
+ reset_index = 0;
+ while (!list_empty(&clist_local)) {
+ reset_cmd = list_entry((&clist_local)->next,
+ struct megasas_cmd, list);
+ list_del_init(&reset_cmd->list);
+ if (reset_cmd->scmd) {
+ reset_cmd->scmd->result = DID_RESET << 16;
+ printk(KERN_NOTICE "%d:%p reset [%02x]\n",
+ reset_index, reset_cmd,
+ reset_cmd->scmd->cmnd[0]);
+
+ reset_cmd->scmd->scsi_done(reset_cmd->scmd);
+ megasas_return_cmd(instance, reset_cmd);
+ } else if (reset_cmd->sync_cmd) {
+ printk(KERN_NOTICE "megasas:%p synch cmds"
+ "reset queue\n",
+ reset_cmd);
+
+ reset_cmd->cmd_status = ENODATA;
+ instance->instancet->fire_cmd(instance,
+ reset_cmd->frame_phys_addr,
+ 0, instance->reg_set);
+ } else {
+ printk(KERN_NOTICE "megasas: %p unexpected"
+ "cmds lst\n",
+ reset_cmd);
+ }
+ reset_index++;
+ }
+
+ return SUCCESS;
+ }
+
+ for (i = 0; i < resetwaittime; i++) {
+
+ int outstanding = atomic_read(&instance->fw_outstanding);
+
+ if (!outstanding)
+ break;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ "commands to complete\n",i,outstanding);
+ /*
+ * Call cmd completion routine. Cmd to be
+ * be completed directly without depending on isr.
+ */
+ megasas_complete_cmd_dpc((unsigned long)instance);
+ }
+
+ msleep(1000);
+ }
+
+ i = 0;
+ kill_adapter_flag = 0;
+ do {
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if ((fw_state == MFI_STATE_FAULT) &&
+ (instance->disableOnlineCtrlReset == 0)) {
+ if (i == 3) {
+ kill_adapter_flag = 2;
+ break;
+ }
+ megasas_do_ocr(instance);
+ kill_adapter_flag = 1;
+
+ /* wait for 1 secs to let FW finish the pending cmds */
+ msleep(1000);
+ }
+ i++;
+ } while (i <= 3);
+
+ if (atomic_read(&instance->fw_outstanding) &&
+ !kill_adapter_flag) {
+ if (instance->disableOnlineCtrlReset == 0) {
+
+ megasas_do_ocr(instance);
+
+ /* wait for 5 secs to let FW finish the pending cmds */
+ for (i = 0; i < wait_time; i++) {
+ int outstanding =
+ atomic_read(&instance->fw_outstanding);
+ if (!outstanding)
+ return SUCCESS;
+ msleep(1000);
+ }
+ }
+ }
+
+ if (atomic_read(&instance->fw_outstanding) ||
+ (kill_adapter_flag == 2)) {
+ printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
+ /*
+ * Send signal to FW to stop processing any pending cmds.
+ * The controller will be taken offline by the OS now.
+ */
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->doorbell);
+ } else {
+ writel(MFI_STOP_ADP,
+ &instance->reg_set->inbound_doorbell);
+ }
+ megasas_dump_pending_frames(instance);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return FAILED;
+ }
+
+ printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
+
+ return SUCCESS;
+}
+
+/**
+ * megasas_generic_reset - Generic reset routine
+ * @scmd: Mid-layer SCSI command
+ *
+ * This routine implements a generic reset handler for device, bus and host
+ * reset requests. Device, bus and host specific reset handlers can use this
+ * function after they do their specific tasks.
+ */
+static int megasas_generic_reset(struct scsi_cmnd *scmd)
+{
+ int ret_val;
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
+ scmd->cmnd[0], scmd->retries);
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_ERR "megasas: cannot recover from previous reset "
+ "failures\n");
+ return FAILED;
+ }
+
+ ret_val = megasas_wait_for_outstanding(instance);
+ if (ret_val == SUCCESS)
+ printk(KERN_NOTICE "megasas: reset successful \n");
+ else
+ printk(KERN_ERR "megasas: failed to do reset\n");
+
+ return ret_val;
+}
+
+/**
+ * megasas_reset_timer - quiesce the adapter if required
+ * @scmd: scsi cmnd
+ *
+ * Sets the FW busy flag and reduces the host->can_queue if the
+ * cmd has not been completed within the timeout period.
+ */
+static enum
+blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
+{
+ struct megasas_instance *instance;
+ unsigned long flags;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc +
+ (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+ return BLK_EH_NOT_HANDLED;
+ }
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ if (!(instance->flag & MEGASAS_FW_BUSY)) {
+ /* FW is busy, throttle IO */
+ spin_lock_irqsave(instance->host->host_lock, flags);
+
+ instance->host->can_queue = instance->throttlequeuedepth;
+ instance->last_time = jiffies;
+ instance->flag |= MEGASAS_FW_BUSY;
+
+ spin_unlock_irqrestore(instance->host->host_lock, flags);
+ }
+ return BLK_EH_RESET_TIMER;
+}
+
+/**
+ * megasas_reset_device - Device reset handler entry point
+ */
+static int megasas_reset_device(struct scsi_cmnd *scmd)
+{
+ int ret;
+
+ /*
+ * First wait for all commands to complete
+ */
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_reset_bus_host - Bus & host reset handler entry point
+ */
+static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
+{
+ int ret;
+ struct megasas_instance *instance;
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ /*
+ * First wait for all commands to complete
+ */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ ret = megasas_reset_fusion(scmd->device->host, 1);
+ else
+ ret = megasas_generic_reset(scmd);
+
+ return ret;
+}
+
+/**
+ * megasas_bios_param - Returns disk geometry for a disk
+ * @sdev: device handle
+ * @bdev: block device
+ * @capacity: drive capacity
+ * @geom: geometry parameters
+ */
+static int
+megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ unsigned long tmp;
+ /* Default heads (64) & sectors (32) */
+ heads = 64;
+ sectors = 32;
+
+ tmp = heads * sectors;
+ cylinders = capacity;
+
+ sector_div(cylinders, tmp);
+
+ /*
+ * Handle extended translation size for logical drives > 1Gb
+ */
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ tmp = heads*sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+static void megasas_aen_polling(struct work_struct *work);
+
+/**
+ * megasas_service_aen - Processes an event notification
+ * @instance: Adapter soft state
+ * @cmd: AEN command completed by the ISR
+ *
+ * For AEN, driver sends a command down to FW that is held by the FW till an
+ * event occurs. When an event of interest occurs, FW completes the command
+ * that it was previously holding.
+ *
+ * This routines sends SIGIO signal to processes that have registered with the
+ * driver for AEN.
+ */
+static void
+megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+ /*
+ * Don't signal app if it is just an aborted previously registered aen
+ */
+ if ((!cmd->abort_aen) && (instance->unload == 0)) {
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ megasas_poll_wait_aen = 1;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ wake_up(&megasas_poll_wait);
+ kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
+ }
+ else
+ cmd->abort_aen = 0;
+
+ instance->aen_cmd = NULL;
+ megasas_return_cmd(instance, cmd);
+
+ if ((instance->unload == 0) &&
+ ((instance->issuepend_done == 1))) {
+ struct megasas_aen_event *ev;
+ ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+ if (!ev) {
+ printk(KERN_ERR "megasas_service_aen: out of memory\n");
+ } else {
+ ev->instance = instance;
+ instance->ev = ev;
+ INIT_DELAYED_WORK(&ev->hotplug_work,
+ megasas_aen_polling);
+ schedule_delayed_work(&ev->hotplug_work, 0);
+ }
+ }
+}
+
+static int megasas_change_queue_depth(struct scsi_device *sdev,
+ int queue_depth, int reason)
+{
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ return -EOPNOTSUPP;
+
+ if (queue_depth > sdev->host->can_queue)
+ queue_depth = sdev->host->can_queue;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
+ queue_depth);
+
+ return queue_depth;
+}
+
+/*
+ * Scsi host template for megaraid_sas driver
+ */
+static struct scsi_host_template megasas_template = {
+
+ .module = THIS_MODULE,
+ .name = "LSI SAS based MegaRAID driver",
+ .proc_name = "megaraid_sas",
+ .slave_configure = megasas_slave_configure,
+ .slave_alloc = megasas_slave_alloc,
+ .queuecommand = megasas_queue_command,
+ .eh_device_reset_handler = megasas_reset_device,
+ .eh_bus_reset_handler = megasas_reset_bus_host,
+ .eh_host_reset_handler = megasas_reset_bus_host,
+ .eh_timed_out = megasas_reset_timer,
+ .bios_param = megasas_bios_param,
+ .use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = megasas_change_queue_depth,
+ .no_write_same = 1,
+};
+
+/**
+ * megasas_complete_int_cmd - Completes an internal command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ *
+ * The megasas_issue_blocked_cmd() function waits for a command to complete
+ * after it issues a command. This function wakes up that waiting routine by
+ * calling wake_up() on the wait queue.
+ */
+static void
+megasas_complete_int_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ cmd->cmd_status = cmd->frame->io.cmd_status;
+
+ if (cmd->cmd_status == ENODATA) {
+ cmd->cmd_status = 0;
+ }
+ wake_up(&instance->int_cmd_wait_q);
+}
+
+/**
+ * megasas_complete_abort - Completes aborting a command
+ * @instance: Adapter soft state
+ * @cmd: Cmd that was issued to abort another cmd
+ *
+ * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
+ * after it issues an abort on a previously issued command. This function
+ * wakes up all functions waiting on the same wait queue.
+ */
+static void
+megasas_complete_abort(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ cmd->cmd_status = 0;
+ wake_up(&instance->abort_cmd_wait_q);
+ }
+
+ return;
+}
+
+/**
+ * megasas_complete_cmd - Completes a command
+ * @instance: Adapter soft state
+ * @cmd: Command to be completed
+ * @alt_status: If non-zero, use this value as status to
+ * SCSI mid-layer instead of the value returned
+ * by the FW. This should be used if caller wants
+ * an alternate status (as in the case of aborted
+ * commands)
+ */
+void
+megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ u8 alt_status)
+{
+ int exception = 0;
+ struct megasas_header *hdr = &cmd->frame->hdr;
+ unsigned long flags;
+ struct fusion_context *fusion = instance->ctrl_context;
+ u32 opcode;
+
+ /* flag for the retry reset */
+ cmd->retry_for_fw_reset = 0;
+
+ if (cmd->scmd)
+ cmd->scmd->SCp.ptr = NULL;
+
+ switch (hdr->cmd) {
+ case MFI_CMD_INVALID:
+ /* Some older 1068 controller FW may keep a pended
+ MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
+ when booting the kdump kernel. Ignore this command to
+ prevent a kernel panic on shutdown of the kdump kernel. */
+ printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command "
+ "completed.\n");
+ printk(KERN_WARNING "megaraid_sas: If you have a controller "
+ "other than PERC5, please upgrade your firmware.\n");
+ break;
+ case MFI_CMD_PD_SCSI_IO:
+ case MFI_CMD_LD_SCSI_IO:
+
+ /*
+ * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
+ * issued either through an IO path or an IOCTL path. If it
+ * was via IOCTL, we will send it to internal completion.
+ */
+ if (cmd->sync_cmd) {
+ cmd->sync_cmd = 0;
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+ }
+
+ case MFI_CMD_LD_READ:
+ case MFI_CMD_LD_WRITE:
+
+ if (alt_status) {
+ cmd->scmd->result = alt_status << 16;
+ exception = 1;
+ }
+
+ if (exception) {
+
+ atomic_dec(&instance->fw_outstanding);
+
+ scsi_dma_unmap(cmd->scmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+ }
+
+ switch (hdr->cmd_status) {
+
+ case MFI_STAT_OK:
+ cmd->scmd->result = DID_OK << 16;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ cmd->scmd->result =
+ (DID_ERROR << 16) | hdr->scsi_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
+
+ if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
+ memset(cmd->scmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ hdr->sense_len);
+
+ cmd->scmd->result |= DRIVER_SENSE << 24;
+ }
+
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
+ hdr->cmd_status);
+ cmd->scmd->result = DID_ERROR << 16;
+ break;
+ }
+
+ atomic_dec(&instance->fw_outstanding);
+
+ scsi_dma_unmap(cmd->scmd);
+ cmd->scmd->scsi_done(cmd->scmd);
+ megasas_return_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_SMP:
+ case MFI_CMD_STP:
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+ /* Check for LD map update */
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (cmd->frame->dcmd.mbox.b[1] == 1)) {
+ fusion->fast_path_io = 0;
+ spin_lock_irqsave(instance->host->host_lock, flags);
+ if (cmd->frame->hdr.cmd_status != 0) {
+ if (cmd->frame->hdr.cmd_status !=
+ MFI_STAT_NOT_FOUND)
+ printk(KERN_WARNING "megasas: map sync"
+ "failed, status = 0x%x.\n",
+ cmd->frame->hdr.cmd_status);
+ else {
+ megasas_return_cmd(instance, cmd);
+ spin_unlock_irqrestore(
+ instance->host->host_lock,
+ flags);
+ break;
+ }
+ } else
+ instance->map_id++;
+ megasas_return_cmd(instance, cmd);
+
+ /*
+ * Set fast path IO to ZERO.
+ * Validate Map will set proper value.
+ * Meanwhile all IOs will go as LD IO.
+ */
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+ megasas_sync_map_info(instance);
+ spin_unlock_irqrestore(instance->host->host_lock,
+ flags);
+ break;
+ }
+ if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
+ opcode == MR_DCMD_CTRL_EVENT_GET) {
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ megasas_poll_wait_aen = 0;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ }
+
+ /*
+ * See if got an event notification
+ */
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ megasas_service_aen(instance, cmd);
+ else
+ megasas_complete_int_cmd(instance, cmd);
+
+ break;
+
+ case MFI_CMD_ABORT:
+ /*
+ * Cmd issued to abort another cmd returned
+ */
+ megasas_complete_abort(instance, cmd);
+ break;
+
+ default:
+ printk("megasas: Unknown command completed! [0x%X]\n",
+ hdr->cmd);
+ break;
+ }
+}
+
+/**
+ * megasas_issue_pending_cmds_again - issue all pending cmds
+ * in FW again because of the fw reset
+ * @instance: Adapter soft state
+ */
+static inline void
+megasas_issue_pending_cmds_again(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct list_head clist_local;
+ union megasas_evt_class_locale class_locale;
+ unsigned long flags;
+ u32 seq_num;
+
+ INIT_LIST_HEAD(&clist_local);
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ list_splice_init(&instance->internal_reset_pending_q, &clist_local);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ while (!list_empty(&clist_local)) {
+ cmd = list_entry((&clist_local)->next,
+ struct megasas_cmd, list);
+ list_del_init(&cmd->list);
+
+ if (cmd->sync_cmd || cmd->scmd) {
+ printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
+ "detected to be pending while HBA reset.\n",
+ cmd, cmd->scmd, cmd->sync_cmd);
+
+ cmd->retry_for_fw_reset++;
+
+ if (cmd->retry_for_fw_reset == 3) {
+ printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
+ "was tried multiple times during reset."
+ "Shutting down the HBA\n",
+ cmd, cmd->scmd, cmd->sync_cmd);
+ megaraid_sas_kill_hba(instance);
+
+ instance->adprecovery =
+ MEGASAS_HW_CRITICAL_ERROR;
+ return;
+ }
+ }
+
+ if (cmd->sync_cmd == 1) {
+ if (cmd->scmd) {
+ printk(KERN_NOTICE "megaraid_sas: unexpected"
+ "cmd attached to internal command!\n");
+ }
+ printk(KERN_NOTICE "megasas: %p synchronous cmd"
+ "on the internal reset queue,"
+ "issue it again.\n", cmd);
+ cmd->cmd_status = ENODATA;
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr ,
+ 0, instance->reg_set);
+ } else if (cmd->scmd) {
+ printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]"
+ "detected on the internal queue, issue again.\n",
+ cmd, cmd->scmd->cmnd[0]);
+
+ atomic_inc(&instance->fw_outstanding);
+ instance->instancet->fire_cmd(instance,
+ cmd->frame_phys_addr,
+ cmd->frame_count-1, instance->reg_set);
+ } else {
+ printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
+ "internal reset defer list while re-issue!!\n",
+ cmd);
+ }
+ }
+
+ if (instance->aen_cmd) {
+ printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
+ megasas_return_cmd(instance, instance->aen_cmd);
+
+ instance->aen_cmd = NULL;
+ }
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ seq_num = instance->last_seq_num;
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ megasas_register_aen(instance, seq_num, class_locale.word);
+}
+
+/**
+ * Move the internal reset pending commands to a deferred queue.
+ *
+ * We move the commands pending at internal reset time to a
+ * pending queue. This queue would be flushed after successful
+ * completion of the internal reset sequence. if the internal reset
+ * did not complete in time, the kernel reset handler would flush
+ * these commands.
+ **/
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ int i;
+ u32 max_cmd = instance->max_fw_cmds;
+ u32 defer_index;
+ unsigned long flags;
+
+ defer_index = 0;
+ spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ if (cmd->sync_cmd == 1 || cmd->scmd) {
+ printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
+ "on the defer queue as internal\n",
+ defer_index, cmd, cmd->sync_cmd, cmd->scmd);
+
+ if (!list_empty(&cmd->list)) {
+ printk(KERN_NOTICE "megaraid_sas: ERROR while"
+ " moving this cmd:%p, %d %p, it was"
+ "discovered on some list?\n",
+ cmd, cmd->sync_cmd, cmd->scmd);
+
+ list_del_init(&cmd->list);
+ }
+ defer_index++;
+ list_add_tail(&cmd->list,
+ &instance->internal_reset_pending_q);
+ }
+ }
+ spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+}
+
+
+static void
+process_fw_state_change_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, work_init);
+ u32 wait;
+ unsigned long flags;
+
+ if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
+ printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
+ instance->adprecovery);
+ return ;
+ }
+
+ if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
+ "state, restarting it...\n");
+
+ instance->instancet->disable_intr(instance);
+ atomic_set(&instance->fw_outstanding, 0);
+
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset(instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0 );
+
+ printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
+ "initiating next stage...\n");
+
+ printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
+ "state 2 starting...\n");
+
+ /*waitting for about 20 second before start the second init*/
+ for (wait = 0; wait < 30; wait++) {
+ msleep(1000);
+ }
+
+ if (megasas_transition_to_ready(instance, 1)) {
+ printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+
+ megaraid_sas_kill_hba(instance);
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ return ;
+ }
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
+ ) {
+ *instance->consumer = *instance->producer;
+ } else {
+ *instance->consumer = 0;
+ *instance->producer = 0;
+ }
+
+ megasas_issue_init_mfi(instance);
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ instance->instancet->enable_intr(instance);
+
+ megasas_issue_pending_cmds_again(instance);
+ instance->issuepend_done = 1;
+ }
+ return ;
+}
+
+/**
+ * megasas_deplete_reply_queue - Processes all completed commands
+ * @instance: Adapter soft state
+ * @alt_status: Alternate status to be returned to
+ * SCSI mid-layer instead of the status
+ * returned by the FW
+ * Note: this must be called with hba lock held
+ */
+static int
+megasas_deplete_reply_queue(struct megasas_instance *instance,
+ u8 alt_status)
+{
+ u32 mfiStatus;
+ u32 fw_state;
+
+ if ((mfiStatus = instance->instancet->check_reset(instance,
+ instance->reg_set)) == 1) {
+ return IRQ_HANDLED;
+ }
+
+ if ((mfiStatus = instance->instancet->clear_intr(
+ instance->reg_set)
+ ) == 0) {
+ /* Hardware may not set outbound_intr_status in MSI-X mode */
+ if (!instance->msix_vectors)
+ return IRQ_NONE;
+ }
+
+ instance->mfiStatus = mfiStatus;
+
+ if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+
+ if (fw_state != MFI_STATE_FAULT) {
+ printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
+ fw_state);
+ }
+
+ if ((fw_state == MFI_STATE_FAULT) &&
+ (instance->disableOnlineCtrlReset == 0)) {
+ printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
+
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS1064R) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_DELL_PERC5) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+
+ *instance->consumer =
+ cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
+ }
+
+
+ instance->instancet->disable_intr(instance);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ instance->issuepend_done = 0;
+
+ atomic_set(&instance->fw_outstanding, 0);
+ megasas_internal_reset_defer_cmds(instance);
+
+ printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
+ fw_state, instance->adprecovery);
+
+ schedule_work(&instance->work_init);
+ return IRQ_HANDLED;
+
+ } else {
+ printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
+ fw_state, instance->disableOnlineCtrlReset);
+ }
+ }
+
+ tasklet_schedule(&instance->isr_tasklet);
+ return IRQ_HANDLED;
+}
+/**
+ * megasas_isr - isr entry point
+ */
+static irqreturn_t megasas_isr(int irq, void *devp)
+{
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
+ unsigned long flags;
+ irqreturn_t rc;
+
+ if (atomic_read(&instance->fw_reset_no_pci_access))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ rc = megasas_deplete_reply_queue(instance, DID_OK);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ return rc;
+}
+
+/**
+ * megasas_transition_to_ready - Move the FW to READY state
+ * @instance: Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+int
+megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
+{
+ int i;
+ u8 max_wait;
+ u32 fw_state;
+ u32 cur_state;
+ u32 abs_state, curr_abs_state;
+
+ abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+ fw_state = abs_state & MFI_STATE_MASK;
+
+ if (fw_state != MFI_STATE_READY)
+ printk(KERN_INFO "megasas: Waiting for FW to come to ready"
+ " state\n");
+
+ while (fw_state != MFI_STATE_READY) {
+
+ switch (fw_state) {
+
+ case MFI_STATE_FAULT:
+ printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
+ if (ocr) {
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
+ } else
+ return -ENODEV;
+
+ case MFI_STATE_WAIT_HANDSHAKE:
+ /*
+ * Set the CLR bit in inbound doorbell
+ */
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ writel(
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+ &instance->reg_set->doorbell);
+ } else {
+ writel(
+ MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
+ &instance->reg_set->inbound_doorbell);
+ }
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_WAIT_HANDSHAKE;
+ break;
+
+ case MFI_STATE_BOOT_MESSAGE_PENDING:
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_INIT_HOTPLUG,
+ &instance->reg_set->doorbell);
+ } else
+ writel(MFI_INIT_HOTPLUG,
+ &instance->reg_set->inbound_doorbell);
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
+ break;
+
+ case MFI_STATE_OPERATIONAL:
+ /*
+ * Bring it to READY state; assuming max wait 10 secs
+ */
+ instance->instancet->disable_intr(instance);
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device
+ == PCI_DEVICE_ID_LSI_FURY)) {
+ writel(MFI_RESET_FLAGS,
+ &instance->reg_set->doorbell);
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ for (i = 0; i < (10 * 1000); i += 20) {
+ if (readl(
+ &instance->
+ reg_set->
+ doorbell) & 1)
+ msleep(20);
+ else
+ break;
+ }
+ }
+ } else
+ writel(MFI_RESET_FLAGS,
+ &instance->reg_set->inbound_doorbell);
+
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_OPERATIONAL;
+ break;
+
+ case MFI_STATE_UNDEFINED:
+ /*
+ * This state should not last for more than 2 seconds
+ */
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_UNDEFINED;
+ break;
+
+ case MFI_STATE_BB_INIT:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_BB_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FW_INIT;
+ break;
+
+ case MFI_STATE_FW_INIT_2:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FW_INIT_2;
+ break;
+
+ case MFI_STATE_DEVICE_SCAN:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_DEVICE_SCAN;
+ break;
+
+ case MFI_STATE_FLUSH_CACHE:
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FLUSH_CACHE;
+ break;
+
+ default:
+ printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
+ fw_state);
+ return -ENODEV;
+ }
+
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+ for (i = 0; i < (max_wait * 1000); i++) {
+ curr_abs_state = instance->instancet->
+ read_fw_status_reg(instance->reg_set);
+
+ if (abs_state == curr_abs_state) {
+ msleep(1);
+ } else
+ break;
+ }
+
+ /*
+ * Return error if fw_state hasn't changed after max_wait
+ */
+ if (curr_abs_state == abs_state) {
+ printk(KERN_DEBUG "FW state [%d] hasn't changed "
+ "in %d secs\n", fw_state, max_wait);
+ return -ENODEV;
+ }
+
+ abs_state = curr_abs_state;
+ fw_state = curr_abs_state & MFI_STATE_MASK;
+ }
+ printk(KERN_INFO "megasas: FW now in Ready state\n");
+
+ return 0;
+}
+
+/**
+ * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
+ * @instance: Adapter soft state
+ */
+static void megasas_teardown_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd = instance->max_mfi_cmds;
+ struct megasas_cmd *cmd;
+
+ if (!instance->frame_dma_pool)
+ return;
+
+ /*
+ * Return all frames to pool
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ if (cmd->frame)
+ pci_pool_free(instance->frame_dma_pool, cmd->frame,
+ cmd->frame_phys_addr);
+
+ if (cmd->sense)
+ pci_pool_free(instance->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+
+ /*
+ * Now destroy the pool itself
+ */
+ pci_pool_destroy(instance->frame_dma_pool);
+ pci_pool_destroy(instance->sense_dma_pool);
+
+ instance->frame_dma_pool = NULL;
+ instance->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_create_frame_pool - Creates DMA pool for cmd frames
+ * @instance: Adapter soft state
+ *
+ * Each command packet has an embedded DMA memory buffer that is used for
+ * filling MFI frame and the SG list that immediately follows the frame. This
+ * function creates those DMA memory buffers for each command packet by using
+ * PCI pool facility.
+ */
+static int megasas_create_frame_pool(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd;
+ u32 sge_sz;
+ u32 sgl_sz;
+ u32 total_sz;
+ u32 frame_count;
+ struct megasas_cmd *cmd;
+
+ max_cmd = instance->max_mfi_cmds;
+
+ /*
+ * Size of our frame is 64 bytes for MFI frame, followed by max SG
+ * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
+ */
+ sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
+ sizeof(struct megasas_sge32);
+
+ if (instance->flag_ieee) {
+ sge_sz = sizeof(struct megasas_sge_skinny);
+ }
+
+ /*
+ * Calculated the number of 64byte frames required for SGL
+ */
+ sgl_sz = sge_sz * instance->max_num_sge;
+ frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
+ frame_count = 15;
+
+ /*
+ * We need one extra frame for the MFI command
+ */
+ frame_count++;
+
+ total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+ instance->frame_dma_pool = pci_pool_create("megasas frame pool",
+ instance->pdev, total_sz, 64,
+ 0);
+
+ if (!instance->frame_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+ return -ENOMEM;
+ }
+
+ instance->sense_dma_pool = pci_pool_create("megasas sense pool",
+ instance->pdev, 128, 4, 0);
+
+ if (!instance->sense_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
+
+ pci_pool_destroy(instance->frame_dma_pool);
+ instance->frame_dma_pool = NULL;
+
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate and attach a frame to each of the commands in cmd_list.
+ * By making cmd->index as the context instead of the &cmd, we can
+ * always use 32bit context regardless of the architecture
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = instance->cmd_list[i];
+
+ cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
+ GFP_KERNEL, &cmd->frame_phys_addr);
+
+ cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+
+ /*
+ * megasas_teardown_frame_pool() takes care of freeing
+ * whatever has been allocated
+ */
+ if (!cmd->frame || !cmd->sense) {
+ printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
+ megasas_teardown_frame_pool(instance);
+ return -ENOMEM;
+ }
+
+ memset(cmd->frame, 0, total_sz);
+ cmd->frame->io.context = cpu_to_le32(cmd->index);
+ cmd->frame->io.pad_0 = 0;
+ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+ (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
+ (reset_devices))
+ cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+ }
+
+ return 0;
+}
+
+/**
+ * megasas_free_cmds - Free all the cmds in the free cmd pool
+ * @instance: Adapter soft state
+ */
+void megasas_free_cmds(struct megasas_instance *instance)
+{
+ int i;
+ /* First free the MFI frame pool */
+ megasas_teardown_frame_pool(instance);
+
+ /* Free all the commands in the cmd_list */
+ for (i = 0; i < instance->max_mfi_cmds; i++)
+
+ kfree(instance->cmd_list[i]);
+
+ /* Free the cmd_list buffer itself */
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&instance->cmd_pool);
+}
+
+/**
+ * megasas_alloc_cmds - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ * Each command that is issued to the FW, whether IO commands from the OS or
+ * internal commands like IOCTLs, are wrapped in local data structure called
+ * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
+ * the FW.
+ *
+ * Each frame has a 32-bit field called context (tag). This context is used
+ * to get back the megasas_cmd from the frame when a frame gets completed in
+ * the ISR. Typically the address of the megasas_cmd itself would be used as
+ * the context. But we wanted to keep the differences between 32 and 64 bit
+ * systems to the mininum. We always use 32 bit integers for the context. In
+ * this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd given the context. The
+ * free commands themselves are maintained in a linked list called cmd_pool.
+ */
+int megasas_alloc_cmds(struct megasas_instance *instance)
+{
+ int i;
+ int j;
+ u32 max_cmd;
+ struct megasas_cmd *cmd;
+
+ max_cmd = instance->max_mfi_cmds;
+
+ /*
+ * instance->cmd_list is an array of struct megasas_cmd pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
+
+ if (!instance->cmd_list) {
+ printk(KERN_DEBUG "megasas: out of memory\n");
+ return -ENOMEM;
+ }
+
+ memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
+
+ for (i = 0; i < max_cmd; i++) {
+ instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
+ GFP_KERNEL);
+
+ if (!instance->cmd_list[i]) {
+
+ for (j = 0; j < i; j++)
+ kfree(instance->cmd_list[j]);
+
+ kfree(instance->cmd_list);
+ instance->cmd_list = NULL;
+
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Add all the commands to command pool (instance->cmd_pool)
+ */
+ for (i = 0; i < max_cmd; i++) {
+ cmd = instance->cmd_list[i];
+ memset(cmd, 0, sizeof(struct megasas_cmd));
+ cmd->index = i;
+ cmd->scmd = NULL;
+ cmd->instance = instance;
+
+ list_add_tail(&cmd->list, &instance->cmd_pool);
+ }
+
+ /*
+ * Create a frame pool and assign one frame to each cmd
+ */
+ if (megasas_create_frame_pool(instance)) {
+ printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ megasas_free_cmds(instance);
+ }
+
+ return 0;
+}
+
+/*
+ * megasas_get_pd_list_info - Returns FW's pd_list structure
+ * @instance: Adapter soft state
+ * @pd_list: pd_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_pd_list(struct megasas_instance *instance)
+{
+ int ret = 0, pd_index = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_PD_LIST *ci;
+ struct MR_PD_ADDRESS *pd_addr;
+ dma_addr_t ci_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
+ dcmd->mbox.b[1] = 0;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ /*
+ * the following function will get the instance PD LIST.
+ */
+
+ pd_addr = ci->addr;
+
+ if ( ret == 0 &&
+ (le32_to_cpu(ci->count) <
+ (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+
+ memset(instance->local_pd_list, 0,
+ MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+
+ for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
+
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
+ le16_to_cpu(pd_addr->deviceId);
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
+ pd_addr->scsiDevType;
+ instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
+ MR_PD_STATE_SYSTEM;
+ pd_addr++;
+ }
+ memcpy(instance->pd_list, instance->local_pd_list,
+ sizeof(instance->pd_list));
+ }
+
+ pci_free_consistent(instance->pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ ci, ci_h);
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * megasas_get_ld_list_info - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_list(struct megasas_instance *instance)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 ld_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
+ dcmd->pad_0 = 0;
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ ld_count = le32_to_cpu(ci->ldCount);
+
+ /* the following function will get the instance PD LIST */
+
+ if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+ for (ld_index = 0; ld_index < ld_count; ld_index++) {
+ if (ci->ldList[ld_index].state != 0) {
+ ids = ci->ldList[ld_index].ref.targetId;
+ instance->ld_ids[ids] =
+ ci->ldList[ld_index].ref.targetId;
+ }
+ }
+ }
+
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_LIST),
+ ci,
+ ci_h);
+
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/**
+ * megasas_ld_list_query - Returns FW's ld_list structure
+ * @instance: Adapter soft state
+ * @ld_list: ld_list structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
+{
+ int ret = 0, ld_index = 0, ids = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_TARGETID_LIST *ci;
+ dma_addr_t ci_h = 0;
+ u32 tgtid_count;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_WARNING
+ "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
+
+ if (!ci) {
+ printk(KERN_WARNING
+ "megasas: Failed to alloc mem for ld_list_query\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.b[0] = query_type;
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
+ dcmd->pad_0 = 0;
+
+ if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
+ ret = 0;
+ } else {
+ /* On failure, call older LD list DCMD */
+ ret = 1;
+ }
+
+ tgtid_count = le32_to_cpu(ci->count);
+
+ if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
+ ids = ci->targetId[ld_index];
+ instance->ld_ids[ids] = ci->targetId[ld_index];
+ }
+
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ ci, ci_h);
+
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+/**
+ * megasas_get_controller_info - Returns FW's controller structure
+ * @instance: Adapter soft state
+ * @ctrl_info: Controller information structure
+ *
+ * Issues an internal command (DCMD) to get the FW's controller structure.
+ * This information is mainly used to find out the maximum IO transfer per
+ * command supported by the FW.
+ */
+static int
+megasas_get_ctrl_info(struct megasas_instance *instance,
+ struct megasas_ctrl_info *ctrl_info)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_ctrl_info *ci;
+ dma_addr_t ci_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ ci = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_ctrl_info), &ci_h);
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+
+ if (!megasas_issue_polled(instance, cmd)) {
+ ret = 0;
+ memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+ } else {
+ ret = -1;
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
+ ci, ci_h);
+
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/**
+ * megasas_issue_init_mfi - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * Issues the INIT MFI cmd
+ */
+static int
+megasas_issue_init_mfi(struct megasas_instance *instance)
+{
+ u32 context;
+
+ struct megasas_cmd *cmd;
+
+ struct megasas_init_frame *init_frame;
+ struct megasas_init_queue_info *initq_info;
+ dma_addr_t init_frame_h;
+ dma_addr_t initq_info_h;
+
+ /*
+ * Prepare a init frame. Note the init frame points to queue info
+ * structure. Each frame has SGL allocated after first 64 bytes. For
+ * this frame - since we don't need any SGL - we use SGL's space as
+ * queue info structure
+ *
+ * We will not get a NULL command below. We just created the pool.
+ */
+ cmd = megasas_get_cmd(instance);
+
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ initq_info = (struct megasas_init_queue_info *)
+ ((unsigned long)init_frame + 64);
+
+ init_frame_h = cmd->frame_phys_addr;
+ initq_info_h = init_frame_h + 64;
+
+ context = init_frame->context;
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
+ init_frame->context = context;
+
+ initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
+ initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
+
+ initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
+ initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(initq_info_h));
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(initq_info_h));
+
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
+
+ /*
+ * disable the intr before firing the init frame to FW
+ */
+ instance->instancet->disable_intr(instance);
+
+ /*
+ * Issue the init frame in polled mode
+ */
+
+ if (megasas_issue_polled(instance, cmd)) {
+ printk(KERN_ERR "megasas: Failed to init firmware\n");
+ megasas_return_cmd(instance, cmd);
+ goto fail_fw_init;
+ }
+
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+
+fail_fw_init:
+ return -EINVAL;
+}
+
+static u32
+megasas_init_adapter_mfi(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *reg_set;
+ u32 context_sz;
+ u32 reply_q_sz;
+
+ reg_set = instance->reg_set;
+
+ /*
+ * Get various operational parameters from status register
+ */
+ instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
+ instance->max_mfi_cmds = instance->max_fw_cmds;
+ instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
+ 0x10;
+ /*
+ * Create a pool of commands
+ */
+ if (megasas_alloc_cmds(instance))
+ goto fail_alloc_cmds;
+
+ /*
+ * Allocate memory for reply queue. Length of reply queue should
+ * be _one_ more than the maximum commands handled by the firmware.
+ *
+ * Note: When FW completes commands, it places corresponding contex
+ * values in this circular reply queue. This circular queue is a fairly
+ * typical producer-consumer queue. FW is the producer (of completed
+ * commands) and the driver is the consumer.
+ */
+ context_sz = sizeof(u32);
+ reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
+
+ instance->reply_queue = pci_alloc_consistent(instance->pdev,
+ reply_q_sz,
+ &instance->reply_queue_h);
+
+ if (!instance->reply_queue) {
+ printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
+ goto fail_reply_queue;
+ }
+
+ if (megasas_issue_init_mfi(instance))
+ goto fail_fw_init;
+
+ instance->fw_support_ieee = 0;
+ instance->fw_support_ieee =
+ (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x04000000);
+
+ printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
+ instance->fw_support_ieee);
+
+ if (instance->fw_support_ieee)
+ instance->flag_ieee = 1;
+
+ return 0;
+
+fail_fw_init:
+
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+fail_reply_queue:
+ megasas_free_cmds(instance);
+
+fail_alloc_cmds:
+ return 1;
+}
+
+/**
+ * megasas_init_fw - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * This is the main function for initializing firmware
+ */
+
+static int megasas_init_fw(struct megasas_instance *instance)
+{
+ u32 max_sectors_1;
+ u32 max_sectors_2;
+ u32 tmp_sectors, msix_enable, scratch_pad_2;
+ resource_size_t base_addr;
+ struct megasas_register_set __iomem *reg_set;
+ struct megasas_ctrl_info *ctrl_info;
+ unsigned long bar_list;
+ int i, loop, fw_msix_count = 0;
+ struct IOV_111 *iovPtr;
+
+ /* Find first memory bar */
+ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
+ instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+ if (pci_request_selected_regions(instance->pdev, instance->bar,
+ "megasas: LSI")) {
+ printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+ return -EBUSY;
+ }
+
+ base_addr = pci_resource_start(instance->pdev, instance->bar);
+ instance->reg_set = ioremap_nocache(base_addr, 8192);
+
+ if (!instance->reg_set) {
+ printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+ goto fail_ioremap;
+ }
+
+ reg_set = instance->reg_set;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ instance->instancet = &megasas_instance_template_fusion;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1078R:
+ case PCI_DEVICE_ID_LSI_SAS1078DE:
+ instance->instancet = &megasas_instance_template_ppc;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+ case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+ instance->instancet = &megasas_instance_template_gen2;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+ case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+ instance->instancet = &megasas_instance_template_skinny;
+ break;
+ case PCI_DEVICE_ID_LSI_SAS1064R:
+ case PCI_DEVICE_ID_DELL_PERC5:
+ default:
+ instance->instancet = &megasas_instance_template_xscale;
+ break;
+ }
+
+ if (megasas_transition_to_ready(instance, 0)) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "megasas: FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waitting for about 30 second before retry*/
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
+
+ /*
+ * MSI-X host index 0 is common for all adapter.
+ * It is used for all MPT based Adapters.
+ */
+ instance->reply_post_host_index_addr[0] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+ 0x4000000) >> 0x1a;
+ if (msix_enable && !msix_disable) {
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
+ /* Check max MSI-X vectors */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {
+ instance->msix_vectors = (scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
+ fw_msix_count = instance->msix_vectors;
+ if (msix_vectors)
+ instance->msix_vectors =
+ min(msix_vectors,
+ instance->msix_vectors);
+ } else if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)
+ || (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ /* Invader/Fury supports more than 8 MSI-X */
+ instance->msix_vectors = ((scratch_pad_2
+ & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
+ >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+ fw_msix_count = instance->msix_vectors;
+ /* Save 1-15 reply post index address to local memory
+ * Index 0 is already saved from reg offset
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET
+ */
+ for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
+ instance->reply_post_host_index_addr[loop] =
+ (u32 *)((u8 *)instance->reg_set +
+ MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
+ + (loop * 0x10));
+ }
+ if (msix_vectors)
+ instance->msix_vectors = min(msix_vectors,
+ instance->msix_vectors);
+ } else
+ instance->msix_vectors = 1;
+ /* Don't bother allocating more MSI-X vectors than cpus */
+ instance->msix_vectors = min(instance->msix_vectors,
+ (unsigned int)num_online_cpus());
+ for (i = 0; i < instance->msix_vectors; i++)
+ instance->msixentry[i].entry = i;
+ i = pci_enable_msix(instance->pdev, instance->msixentry,
+ instance->msix_vectors);
+ if (i >= 0) {
+ if (i) {
+ if (!pci_enable_msix(instance->pdev,
+ instance->msixentry, i))
+ instance->msix_vectors = i;
+ else
+ instance->msix_vectors = 0;
+ }
+ } else
+ instance->msix_vectors = 0;
+
+ dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
+ "<%d> MSIX vector,Online CPUs: <%d>,"
+ "Current MSIX <%d>\n", instance->host->host_no,
+ fw_msix_count, (unsigned int)num_online_cpus(),
+ instance->msix_vectors);
+ }
+
+ /* Get operational params, sge flags, send init cmd to controller */
+ if (instance->instancet->init_adapter(instance))
+ goto fail_init_adapter;
+
+ printk(KERN_ERR "megasas: INIT adapter done\n");
+
+ /** for passthrough
+ * the following function will get the PD LIST.
+ */
+
+ memset(instance->pd_list, 0 ,
+ (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
+ if (megasas_get_pd_list(instance) < 0) {
+ printk(KERN_ERR "megasas: failed to get PD list\n");
+ goto fail_init_adapter;
+ }
+
+ memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+
+ ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
+
+ /*
+ * Compute the max allowed sectors per IO: The controller info has two
+ * limits on max sectors. Driver should use the minimum of these two.
+ *
+ * 1 << stripe_sz_ops.min = max sectors per strip
+ *
+ * Note that older firmwares ( < FW ver 30) didn't report information
+ * to calculate max_sectors_1. So the number ended up as zero always.
+ */
+ tmp_sectors = 0;
+ if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
+
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ le16_to_cpu(ctrl_info->max_strips_per_io);
+ max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
+
+ tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+
+ /*Check whether controller is iMR or MR */
+ if (ctrl_info->memory_size) {
+ instance->is_imr = 0;
+ dev_info(&instance->pdev->dev, "Controller type: MR,"
+ "Memory size is: %dMB\n",
+ le16_to_cpu(ctrl_info->memory_size));
+ } else {
+ instance->is_imr = 1;
+ dev_info(&instance->pdev->dev,
+ "Controller type: iMR\n");
+ }
+ /* OnOffProperties are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
+ instance->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ /* adapterOperations2 are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+ instance->mpio = ctrl_info->adapterOperations2.mpio;
+ instance->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if (instance->UnevenSpanSupport) {
+ struct fusion_context *fusion = instance->ctrl_context;
+ dev_info(&instance->pdev->dev, "FW supports: "
+ "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+
+ }
+ if (ctrl_info->host_interface.SRIOV) {
+ if (!ctrl_info->adapterOperations2.activePassive)
+ instance->PlasmaFW111 = 1;
+
+ if (!instance->PlasmaFW111)
+ instance->requestorId =
+ ctrl_info->iov.requestorId;
+ else {
+ iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+ instance->requestorId = iovPtr->requestorId;
+ }
+ printk(KERN_WARNING "megaraid_sas: I am VF "
+ "requestorId %d\n", instance->requestorId);
+ }
+ }
+ instance->max_sectors_per_req = instance->max_num_sge *
+ PAGE_SIZE / 512;
+ if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
+ instance->max_sectors_per_req = tmp_sectors;
+
+ kfree(ctrl_info);
+
+ /* Check for valid throttlequeuedepth module parameter */
+ if (instance->is_imr) {
+ if (throttlequeuedepth > (instance->max_fw_cmds -
+ MEGASAS_SKINNY_INT_CMDS))
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+ else
+ instance->throttlequeuedepth = throttlequeuedepth;
+ } else {
+ if (throttlequeuedepth > (instance->max_fw_cmds -
+ MEGASAS_INT_CMDS))
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+ else
+ instance->throttlequeuedepth = throttlequeuedepth;
+ }
+
+ /*
+ * Setup tasklet for cmd completion
+ */
+
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+ (unsigned long)instance);
+
+ /* Launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 1))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ return 0;
+
+fail_init_adapter:
+fail_ready_state:
+ iounmap(instance->reg_set);
+
+ fail_ioremap:
+ pci_release_selected_regions(instance->pdev, instance->bar);
+
+ return -EINVAL;
+}
+
+/**
+ * megasas_release_mfi - Reverses the FW initialization
+ * @intance: Adapter soft state
+ */
+static void megasas_release_mfi(struct megasas_instance *instance)
+{
+ u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
+
+ if (instance->reply_queue)
+ pci_free_consistent(instance->pdev, reply_q_sz,
+ instance->reply_queue, instance->reply_queue_h);
+
+ megasas_free_cmds(instance);
+
+ iounmap(instance->reg_set);
+
+ pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_get_seq_num - Gets latest event sequence numbers
+ * @instance: Adapter soft state
+ * @eli: FW event log sequence numbers information
+ *
+ * FW maintains a log of all events in a non-volatile area. Upper layers would
+ * usually find out the latest sequence number of the events, the seq number at
+ * the boot etc. They would "read" all the events below the latest seq number
+ * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
+ * number), they would subsribe to AEN (asynchronous event notification) and
+ * wait for the events to happen.
+ */
+static int
+megasas_get_seq_num(struct megasas_instance *instance,
+ struct megasas_evt_log_info *eli)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct megasas_evt_log_info *el_info;
+ dma_addr_t el_info_h = 0;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+ el_info = pci_alloc_consistent(instance->pdev,
+ sizeof(struct megasas_evt_log_info),
+ &el_info_h);
+
+ if (!el_info) {
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(el_info, 0, sizeof(*el_info));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+ else {
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num);
+ eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num);
+ eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num);
+ eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num);
+ eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num);
+ }
+
+ pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
+
+ megasas_return_cmd(instance, cmd);
+
+ return 0;
+}
+
+/**
+ * megasas_register_aen - Registers for asynchronous event notification
+ * @instance: Adapter soft state
+ * @seq_num: The starting sequence number
+ * @class_locale: Class of the event
+ *
+ * This function subscribes for AEN for events beyond the @seq_num. It requests
+ * to be notified if and only if the event is of type @class_locale
+ */
+static int
+megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+ u32 class_locale_word)
+{
+ int ret_val;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ union megasas_evt_class_locale curr_aen;
+ union megasas_evt_class_locale prev_aen;
+
+ /*
+ * If there an AEN pending already (aen_cmd), check if the
+ * class_locale of that pending AEN is inclusive of the new
+ * AEN request we currently have. If it is, then we don't have
+ * to do anything. In other words, whichever events the current
+ * AEN request is subscribing to, have already been subscribed
+ * to.
+ *
+ * If the old_cmd is _not_ inclusive, then we have to abort
+ * that command, form a class_locale that is superset of both
+ * old and current and re-issue to the FW
+ */
+
+ curr_aen.word = class_locale_word;
+
+ if (instance->aen_cmd) {
+
+ prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
+ prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale);
+
+ /*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+ * registered, then a new registration requests for higher
+ * classes need not be sent to FW. They are automatically
+ * included.
+ *
+ * Locale numbers don't have such hierarchy. They are bitmap
+ * values
+ */
+ if ((prev_aen.members.class <= curr_aen.members.class) &&
+ !((prev_aen.members.locale & curr_aen.members.locale) ^
+ curr_aen.members.locale)) {
+ /*
+ * Previously issued event registration includes
+ * current request. Nothing to do.
+ */
+ return 0;
+ } else {
+ curr_aen.members.locale |= prev_aen.members.locale;
+
+ if (prev_aen.members.class < curr_aen.members.class)
+ curr_aen.members.class = prev_aen.members.class;
+
+ instance->aen_cmd->abort_aen = 1;
+ ret_val = megasas_issue_blocked_abort_cmd(instance,
+ instance->
+ aen_cmd, 30);
+
+ if (ret_val) {
+ printk(KERN_DEBUG "megasas: Failed to abort "
+ "previous AEN command\n");
+ return ret_val;
+ }
+ }
+ }
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return -ENOMEM;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
+
+ /*
+ * Prepare DCMD for aen registration
+ */
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
+ dcmd->mbox.w[0] = cpu_to_le32(seq_num);
+ instance->last_seq_num = seq_num;
+ dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+ if (instance->aen_cmd != NULL) {
+ megasas_return_cmd(instance, cmd);
+ return 0;
+ }
+
+ /*
+ * Store reference to the cmd used to register for AEN. When an
+ * application wants us to register for AEN, we have to abort this
+ * cmd and re-register with a new EVENT LOCALE supplied by that app
+ */
+ instance->aen_cmd = cmd;
+
+ /*
+ * Issue the aen registration frame
+ */
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ return 0;
+}
+
+/**
+ * megasas_start_aen - Subscribes to AEN during driver load time
+ * @instance: Adapter soft state
+ */
+static int megasas_start_aen(struct megasas_instance *instance)
+{
+ struct megasas_evt_log_info eli;
+ union megasas_evt_class_locale class_locale;
+
+ /*
+ * Get the latest sequence number from FW
+ */
+ memset(&eli, 0, sizeof(eli));
+
+ if (megasas_get_seq_num(instance, &eli))
+ return -1;
+
+ /*
+ * Register AEN with FW for latest sequence number plus 1
+ */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ return megasas_register_aen(instance,
+ eli.newest_seq_num + 1,
+ class_locale.word);
+}
+
+/**
+ * megasas_io_attach - Attaches this driver to SCSI mid-layer
+ * @instance: Adapter soft state
+ */
+static int megasas_io_attach(struct megasas_instance *instance)
+{
+ struct Scsi_Host *host = instance->host;
+
+ /*
+ * Export parameters required by SCSI mid-layer
+ */
+ host->irq = instance->pdev->irq;
+ host->unique_id = instance->unique_id;
+ if (instance->is_imr) {
+ host->can_queue =
+ instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS;
+ } else
+ host->can_queue =
+ instance->max_fw_cmds - MEGASAS_INT_CMDS;
+ host->this_id = instance->init_id;
+ host->sg_tablesize = instance->max_num_sge;
+
+ if (instance->fw_support_ieee)
+ instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
+ /*
+ * Check if the module parameter value for max_sectors can be used
+ */
+ if (max_sectors && max_sectors < instance->max_sectors_per_req)
+ instance->max_sectors_per_req = max_sectors;
+ else {
+ if (max_sectors) {
+ if (((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+ (max_sectors <= MEGASAS_MAX_SECTORS)) {
+ instance->max_sectors_per_req = max_sectors;
+ } else {
+ printk(KERN_INFO "megasas: max_sectors should be > 0"
+ "and <= %d (or < 1MB for GEN2 controller)\n",
+ instance->max_sectors_per_req);
+ }
+ }
+ }
+
+ host->max_sectors = instance->max_sectors_per_req;
+ host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
+ host->max_channel = MEGASAS_MAX_CHANNELS - 1;
+ host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
+ host->max_lun = MEGASAS_MAX_LUN;
+ host->max_cmd_len = 16;
+
+ /* Fusion only supports host reset */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ host->hostt->eh_device_reset_handler = NULL;
+ host->hostt->eh_bus_reset_handler = NULL;
+ }
+
+ /*
+ * Notify the mid-layer about the new controller
+ */
+ if (scsi_add_host(host, &instance->pdev->dev)) {
+ printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Trigger SCSI to scan our drives
+ */
+ scsi_scan_host(host);
+ return 0;
+}
+
+static int
+megasas_set_dma_mask(struct pci_dev *pdev)
+{
+ /*
+ * All our contollers are capable of performing 64-bit DMA
+ */
+ if (IS_DMA64) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ goto fail_set_dma_mask;
+ }
+ /*
+ * Ensure that all data structures are allocated in 32-bit
+ * memory.
+ */
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ /* Try 32bit DMA mask and 32 bit Consistent dma mask */
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ dev_info(&pdev->dev, "set 32bit DMA mask"
+ "and 32 bit consistent mask\n");
+ else
+ goto fail_set_dma_mask;
+ }
+
+ return 0;
+
+fail_set_dma_mask:
+ return 1;
+}
+
+/**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rval, pos, i, j, cpu;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
+
+ /*
+ * Announce PCI information
+ */
+ printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ printk("bus %d:slot %d:func %d\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+ atomic_set( &instance->fw_reset_no_pci_access, 0 );
+ instance->pdev = pdev;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ {
+ struct fusion_context *fusion;
+
+ instance->ctrl_context =
+ kzalloc(sizeof(struct fusion_context), GFP_KERNEL);
+ if (!instance->ctrl_context) {
+ printk(KERN_DEBUG "megasas: Failed to allocate "
+ "memory for Fusion context info\n");
+ goto fail_alloc_dma_buf;
+ }
+ fusion = instance->ctrl_context;
+ INIT_LIST_HEAD(&fusion->cmd_pool);
+ spin_lock_init(&fusion->cmd_pool_lock);
+ }
+ break;
+ default: /* For all other supported controllers */
+
+ instance->producer =
+ pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer =
+ pci_alloc_consistent(pdev, sizeof(u32),
+ &instance->consumer_h);
+
+ if (!instance->producer || !instance->consumer) {
+ printk(KERN_DEBUG "megasas: Failed to allocate"
+ "memory for producer, consumer\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ break;
+ }
+
+ megasas_poll_wait_aen = 0;
+ instance->flag_ieee = 0;
+ instance->ev = NULL;
+ instance->issuepend_done = 1;
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ instance->is_imr = 0;
+ megasas_poll_wait_aen = 0;
+
+ instance->evt_detail = pci_alloc_consistent(pdev,
+ sizeof(struct
+ megasas_evt_detail),
+ &instance->evt_detail_h);
+
+ if (!instance->evt_detail) {
+ printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+ "event detail structure\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ /*
+ * Initialize locks and queues
+ */
+ INIT_LIST_HEAD(&instance->cmd_pool);
+ INIT_LIST_HEAD(&instance->internal_reset_pending_q);
+
+ atomic_set(&instance->fw_outstanding,0);
+
+ init_waitqueue_head(&instance->int_cmd_wait_q);
+ init_waitqueue_head(&instance->abort_cmd_wait_q);
+
+ spin_lock_init(&instance->cmd_pool_lock);
+ spin_lock_init(&instance->hba_lock);
+ spin_lock_init(&instance->completion_lock);
+
+ mutex_init(&instance->aen_mutex);
+ mutex_init(&instance->reset_mutex);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
+ instance->flag_ieee = 1;
+ sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
+ } else
+ sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
+
+ megasas_dbg_lvl = 0;
+ instance->flag = 0;
+ instance->unload = 1;
+ instance->last_time = 0;
+ instance->disableOnlineCtrlReset = 1;
+ instance->UnevenSpanSupport = 0;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
+ else
+ INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+
+ /*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_fw(instance))
+ goto fail_init_mfi;
+
+ if (instance->requestorId) {
+ if (instance->PlasmaFW111) {
+ instance->vf_affiliation_111 =
+ pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h);
+ if (!instance->vf_affiliation_111)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ } else {
+ instance->vf_affiliation =
+ pci_alloc_consistent(pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h);
+ if (!instance->vf_affiliation)
+ printk(KERN_WARNING "megasas: Can't allocate "
+ "memory for VF affiliation buffer\n");
+ }
+ }
+
+retry_irq_register:
+ /*
+ * Register IRQ
+ */
+ if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0; j < i; j++) {
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ }
+ /* Retry irq register for IO_APIC */
+ instance->msix_vectors = 0;
+ goto retry_irq_register;
+ }
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error setting"
+ "affinity hint for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+ }
+
+ instance->instancet->enable_intr(instance);
+
+ /*
+ * Store instance in PCI softstate
+ */
+ pci_set_drvdata(pdev, instance);
+
+ /*
+ * Add this controller to megasas_mgmt_info structure so that it
+ * can be exported to management applications
+ */
+ megasas_mgmt_info.count++;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
+ megasas_mgmt_info.max_index++;
+
+ /*
+ * Register with SCSI mid-layer
+ */
+ if (megasas_io_attach(instance))
+ goto fail_io_attach;
+
+ instance->unload = 0;
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance)) {
+ printk(KERN_DEBUG "megasas: start aen failed\n");
+ goto fail_start_aen;
+ }
+
+ return 0;
+
+ fail_start_aen:
+ fail_io_attach:
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
+ megasas_mgmt_info.max_index--;
+
+ instance->instancet->disable_intr(instance);
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+fail_irq:
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ megasas_release_fusion(instance);
+ else
+ megasas_release_mfi(instance);
+ fail_init_mfi:
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+ fail_alloc_dma_buf:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+ fail_alloc_instance:
+ fail_set_dma_mask:
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+
+/**
+ * megasas_flush_cache - Requests FW to flush all its caches
+ * @instance: Adapter soft state
+ */
+static void megasas_flush_cache(struct megasas_instance *instance)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
+ dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ " from %s\n", __func__);
+
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+/**
+ * megasas_shutdown_controller - Instructs FW to shutdown the controller
+ * @instance: Adapter soft state
+ * @opcode: Shutdown/Hibernate
+ */
+static void megasas_shutdown_controller(struct megasas_instance *instance,
+ u32 opcode)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd)
+ return;
+
+ if (instance->aen_cmd)
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->aen_cmd, 30);
+ if (instance->map_update_cmd)
+ megasas_issue_blocked_abort_cmd(instance,
+ instance->map_update_cmd, 30);
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0x0;
+ dcmd->sge_count = 0;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = 0;
+ dcmd->opcode = cpu_to_le32(opcode);
+
+ if (megasas_issue_blocked_cmd(instance, cmd, 30))
+ dev_err(&instance->pdev->dev, "Command timedout"
+ "from %s\n", __func__);
+
+ megasas_return_cmd(instance, cmd);
+
+ return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * megasas_suspend - driver suspend entry point
+ * @pdev: PCI device structure
+ * @state: PCI power state to suspend routine
+ */
+static int
+megasas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ int i;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+ instance->unload = 1;
+
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
+
+ /* cancel the delayed work if this work still in queue */
+ if (instance->ev != NULL) {
+ struct megasas_aen_event *ev = instance->ev;
+ cancel_delayed_work_sync(&ev->hotplug_work);
+ instance->ev = NULL;
+ }
+
+ tasklet_kill(&instance->isr_tasklet);
+
+ pci_set_drvdata(instance->pdev, instance);
+ instance->instancet->disable_intr(instance);
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+/**
+ * megasas_resume- driver resume entry point
+ * @pdev: PCI device structure
+ */
+static int
+megasas_resume(struct pci_dev *pdev)
+{
+ int rval, i, j, cpu;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+
+ instance = pci_get_drvdata(pdev);
+ host = instance->host;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ printk(KERN_ERR "megasas: Enable device failed\n");
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ if (megasas_set_dma_mask(pdev))
+ goto fail_set_dma_mask;
+
+ /*
+ * Initialize MFI Firmware
+ */
+
+ atomic_set(&instance->fw_outstanding, 0);
+
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+
+ /* Now re-enable MSI-X */
+ if (instance->msix_vectors)
+ pci_enable_msix(instance->pdev, instance->msixentry,
+ instance->msix_vectors);
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ {
+ megasas_reset_reply_desc(instance);
+ if (megasas_ioc_init_fusion(instance)) {
+ megasas_free_cmds(instance);
+ megasas_free_cmds_fusion(instance);
+ goto fail_init_mfi;
+ }
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+ }
+ break;
+ default:
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ if (megasas_issue_init_mfi(instance))
+ goto fail_init_mfi;
+ break;
+ }
+
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
+ (unsigned long)instance);
+
+ /*
+ * Register IRQ
+ */
+ if (instance->msix_vectors) {
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0 ; i < instance->msix_vectors; i++) {
+ instance->irq_context[i].instance = instance;
+ instance->irq_context[i].MSIxIndex = i;
+ if (request_irq(instance->msixentry[i].vector,
+ instance->instancet->service_isr, 0,
+ "megasas",
+ &instance->irq_context[i])) {
+ printk(KERN_DEBUG "megasas: Failed to "
+ "register IRQ for vector %d.\n", i);
+ for (j = 0; j < i; j++) {
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
+ free_irq(
+ instance->msixentry[j].vector,
+ &instance->irq_context[j]);
+ }
+ goto fail_irq;
+ }
+
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error setting"
+ "affinity hint for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+ } else {
+ instance->irq_context[0].instance = instance;
+ instance->irq_context[0].MSIxIndex = 0;
+ if (request_irq(pdev->irq, instance->instancet->service_isr,
+ IRQF_SHARED, "megasas",
+ &instance->irq_context[0])) {
+ printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
+ goto fail_irq;
+ }
+ }
+
+ /* Re-launch SR-IOV heartbeat timer */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ instance->instancet->enable_intr(instance);
+ instance->unload = 0;
+
+ /*
+ * Initiate AEN (Asynchronous Event Notification)
+ */
+ if (megasas_start_aen(instance))
+ printk(KERN_ERR "megasas: Start AEN failed\n");
+
+ return 0;
+
+fail_irq:
+fail_init_mfi:
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (instance->producer)
+ pci_free_consistent(pdev, sizeof(u32), instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(pdev, sizeof(u32), instance->consumer,
+ instance->consumer_h);
+ scsi_host_put(host);
+
+fail_set_dma_mask:
+fail_ready_state:
+
+ pci_disable_device(pdev);
+
+ return -ENODEV;
+}
+#else
+#define megasas_suspend NULL
+#define megasas_resume NULL
+#endif
+
+/**
+ * megasas_detach_one - PCI hot"un"plug entry point
+ * @pdev: PCI device structure
+ */
+static void megasas_detach_one(struct pci_dev *pdev)
+{
+ int i;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ struct fusion_context *fusion;
+
+ instance = pci_get_drvdata(pdev);
+ instance->unload = 1;
+ host = instance->host;
+ fusion = instance->ctrl_context;
+
+ /* Shutdown SR-IOV heartbeat timer */
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+
+ scsi_remove_host(instance->host);
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+ /* cancel the delayed work if this work still in queue*/
+ if (instance->ev != NULL) {
+ struct megasas_aen_event *ev = instance->ev;
+ cancel_delayed_work_sync(&ev->hotplug_work);
+ instance->ev = NULL;
+ }
+
+ /* cancel all wait events */
+ wake_up_all(&instance->int_cmd_wait_q);
+
+ tasklet_kill(&instance->isr_tasklet);
+
+ /*
+ * Take the instance off the instance array. Note that we will not
+ * decrement the max_index. We let this array be sparse array
+ */
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ if (megasas_mgmt_info.instance[i] == instance) {
+ megasas_mgmt_info.count--;
+ megasas_mgmt_info.instance[i] = NULL;
+
+ break;
+ }
+ }
+
+ instance->instancet->disable_intr(instance);
+
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_FURY:
+ megasas_release_fusion(instance);
+ for (i = 0; i < 2 ; i++)
+ if (fusion->ld_map[i])
+ dma_free_coherent(&instance->pdev->dev,
+ fusion->map_sz,
+ fusion->ld_map[i],
+ fusion->
+ ld_map_phys[i]);
+ kfree(instance->ctrl_context);
+ break;
+ default:
+ megasas_release_mfi(instance);
+ pci_free_consistent(pdev, sizeof(u32),
+ instance->producer,
+ instance->producer_h);
+ pci_free_consistent(pdev, sizeof(u32),
+ instance->consumer,
+ instance->consumer_h);
+ break;
+ }
+
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail, instance->evt_detail_h);
+
+ if (instance->vf_affiliation)
+ pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ instance->vf_affiliation,
+ instance->vf_affiliation_h);
+
+ if (instance->vf_affiliation_111)
+ pci_free_consistent(pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ instance->vf_affiliation_111,
+ instance->vf_affiliation_111_h);
+
+ if (instance->hb_host_mem)
+ pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ instance->hb_host_mem,
+ instance->hb_host_mem_h);
+
+ scsi_host_put(host);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+/**
+ * megasas_shutdown - Shutdown entry point
+ * @device: Generic device structure
+ */
+static void megasas_shutdown(struct pci_dev *pdev)
+{
+ int i;
+ struct megasas_instance *instance = pci_get_drvdata(pdev);
+
+ instance->unload = 1;
+ megasas_flush_cache(instance);
+ megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+ instance->instancet->disable_intr(instance);
+ if (instance->msix_vectors)
+ for (i = 0; i < instance->msix_vectors; i++) {
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
+ free_irq(instance->msixentry[i].vector,
+ &instance->irq_context[i]);
+ }
+ else
+ free_irq(instance->pdev->irq, &instance->irq_context[0]);
+ if (instance->msix_vectors)
+ pci_disable_msix(instance->pdev);
+}
+
+/**
+ * megasas_mgmt_open - char node "open" entry point
+ */
+static int megasas_mgmt_open(struct inode *inode, struct file *filep)
+{
+ /*
+ * Allow only those users with admin rights
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * megasas_mgmt_fasync - Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
+{
+ int rc;
+
+ mutex_lock(&megasas_async_queue_mutex);
+
+ rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
+
+ mutex_unlock(&megasas_async_queue_mutex);
+
+ if (rc >= 0) {
+ /* For sanity check when we get ioctl */
+ filep->private_data = filep;
+ return 0;
+ }
+
+ printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
+
+ return rc;
+}
+
+/**
+ * megasas_mgmt_poll - char node "poll" entry point
+ * */
+static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask;
+ unsigned long flags;
+ poll_wait(file, &megasas_poll_wait, wait);
+ spin_lock_irqsave(&poll_aen_lock, flags);
+ if (megasas_poll_wait_aen)
+ mask = (POLLIN | POLLRDNORM);
+ else
+ mask = 0;
+ spin_unlock_irqrestore(&poll_aen_lock, flags);
+ return mask;
+}
+
+/**
+ * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
+ * @instance: Adapter soft state
+ * @argp: User's ioctl packet
+ */
+static int
+megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ struct megasas_iocpacket __user * user_ioc,
+ struct megasas_iocpacket *ioc)
+{
+ struct megasas_sge32 *kern_sge32;
+ struct megasas_cmd *cmd;
+ void *kbuff_arr[MAX_IOCTL_SGE];
+ dma_addr_t buf_handle = 0;
+ int error = 0, i;
+ void *sense = NULL;
+ dma_addr_t sense_handle;
+ unsigned long *sense_ptr;
+
+ memset(kbuff_arr, 0, sizeof(kbuff_arr));
+
+ if (ioc->sge_count > MAX_IOCTL_SGE) {
+ printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
+ ioc->sge_count, MAX_IOCTL_SGE);
+ return -EINVAL;
+ }
+
+ cmd = megasas_get_cmd(instance);
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * User's IOCTL packet has 2 frames (maximum). Copy those two
+ * frames into our cmd's frames. cmd->frame's context will get
+ * overwritten when we copy from user's frames. So set that value
+ * alone separately
+ */
+ memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
+ cmd->frame->hdr.context = cpu_to_le32(cmd->index);
+ cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
+ MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
+
+ /*
+ * The management interface between applications and the fw uses
+ * MFI frames. E.g, RAID configuration changes, LD property changes
+ * etc are accomplishes through different kinds of MFI frames. The
+ * driver needs to care only about substituting user buffers with
+ * kernel buffers in SGLs. The location of SGL is embedded in the
+ * struct iocpacket itself.
+ */
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+
+ /*
+ * For each user buffer, create a mirror buffer and copy in
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (!ioc->sgl[i].iov_len)
+ continue;
+
+ kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
+ ioc->sgl[i].iov_len,
+ &buf_handle, GFP_KERNEL);
+ if (!kbuff_arr[i]) {
+ printk(KERN_DEBUG "megasas: Failed to alloc "
+ "kernel SGL buffer for IOCTL \n");
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * We don't change the dma_coherent_mask, so
+ * pci_alloc_consistent only returns 32bit addresses
+ */
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+
+ /*
+ * We created a kernel buffer corresponding to the
+ * user buffer. Now copy in from the user buffer
+ */
+ if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
+ (u32) (ioc->sgl[i].iov_len))) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ if (ioc->sense_len) {
+ sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
+ &sense_handle, GFP_KERNEL);
+ if (!sense) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ sense_ptr =
+ (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
+ *sense_ptr = cpu_to_le32(sense_handle);
+ }
+
+ /*
+ * Set the sync_cmd flag so that the ISR knows not to complete this
+ * cmd to the SCSI mid-layer
+ */
+ cmd->sync_cmd = 1;
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+ cmd->sync_cmd = 0;
+
+ /*
+ * copy out the kernel buffers to user buffers
+ */
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
+ ioc->sgl[i].iov_len)) {
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy out the sense
+ */
+ if (ioc->sense_len) {
+ /*
+ * sense_ptr points to the location that has the user
+ * sense buffer address
+ */
+ sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
+ ioc->sense_off);
+
+ if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+ sense, ioc->sense_len)) {
+ printk(KERN_ERR "megasas: Failed to copy out to user "
+ "sense data\n");
+ error = -EFAULT;
+ goto out;
+ }
+ }
+
+ /*
+ * copy the status codes returned by the fw
+ */
+ if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
+ &cmd->frame->hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
+ error = -EFAULT;
+ }
+
+ out:
+ if (sense) {
+ dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
+ sense, sense_handle);
+ }
+
+ for (i = 0; i < ioc->sge_count; i++) {
+ if (kbuff_arr[i])
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
+ }
+
+ megasas_return_cmd(instance, cmd);
+ return error;
+}
+
+static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct megasas_iocpacket __user *user_ioc =
+ (struct megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket *ioc;
+ struct megasas_instance *instance;
+ int error;
+ int i;
+ unsigned long flags;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+ ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+ if (!ioc)
+ return -ENOMEM;
+
+ if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
+ error = -EFAULT;
+ goto out_kfree_ioc;
+ }
+
+ instance = megasas_lookup_instance(ioc->host_no);
+ if (!instance) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ /* Adjust ioctl wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Block ioctls in VF mode */
+ if (instance->requestorId && !allow_vf_ioctls) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_ERR "Controller in crit error\n");
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ if (instance->unload == 1) {
+ error = -ENODEV;
+ goto out_kfree_ioc;
+ }
+
+ /*
+ * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
+ */
+ if (down_interruptible(&instance->ioctl_sem)) {
+ error = -ERESTARTSYS;
+ goto out_kfree_ioc;
+ }
+
+ for (i = 0; i < wait_time; i++) {
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: waiting"
+ "for controller reset to finish\n");
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ printk(KERN_ERR "megaraid_sas: timed out while"
+ "waiting for HBA to recover\n");
+ error = -ENODEV;
+ goto out_up;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+ out_up:
+ up(&instance->ioctl_sem);
+
+ out_kfree_ioc:
+ kfree(ioc);
+ return error;
+}
+
+static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
+{
+ struct megasas_instance *instance;
+ struct megasas_aen aen;
+ int error;
+ int i;
+ unsigned long flags;
+ u32 wait_time = MEGASAS_RESET_WAIT_TIME;
+
+ if (file->private_data != file) {
+ printk(KERN_DEBUG "megasas: fasync_helper was not "
+ "called first\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
+ return -EFAULT;
+
+ instance = megasas_lookup_instance(aen.host_no);
+
+ if (!instance)
+ return -ENODEV;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ return -ENODEV;
+ }
+
+ if (instance->unload == 1) {
+ return -ENODEV;
+ }
+
+ for (i = 0; i < wait_time; i++) {
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock,
+ flags);
+ break;
+ }
+
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: waiting for"
+ "controller reset to finish\n");
+ }
+
+ msleep(1000);
+ }
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ printk(KERN_ERR "megaraid_sas: timed out while waiting"
+ "for HBA to recover.\n");
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ mutex_lock(&instance->aen_mutex);
+ error = megasas_register_aen(instance, aen.seq_num,
+ aen.class_locale_word);
+ mutex_unlock(&instance->aen_mutex);
+ return error;
+}
+
+/**
+ * megasas_mgmt_ioctl - char node ioctl entry point
+ */
+static long
+megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE:
+ return megasas_mgmt_ioctl_fw(file, arg);
+
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+
+#ifdef CONFIG_COMPAT
+static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
+{
+ struct compat_megasas_iocpacket __user *cioc =
+ (struct compat_megasas_iocpacket __user *)arg;
+ struct megasas_iocpacket __user *ioc =
+ compat_alloc_user_space(sizeof(struct megasas_iocpacket));
+ int i;
+ int error = 0;
+ compat_uptr_t ptr;
+
+ if (clear_user(ioc, sizeof(*ioc)))
+ return -EFAULT;
+
+ if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
+ copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
+ copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
+ copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
+ copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
+ return -EFAULT;
+
+ /*
+ * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+ * sense_len is not null, so prepare the 64bit value under
+ * the same condition.
+ */
+ if (ioc->sense_len) {
+ void __user **sense_ioc_ptr =
+ (void __user **)(ioc->frame.raw + ioc->sense_off);
+ compat_uptr_t *sense_cioc_ptr =
+ (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ if (get_user(ptr, sense_cioc_ptr) ||
+ put_user(compat_ptr(ptr), sense_ioc_ptr))
+ return -EFAULT;
+ }
+
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
+ if (get_user(ptr, &cioc->sgl[i].iov_base) ||
+ put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
+ copy_in_user(&ioc->sgl[i].iov_len,
+ &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
+ return -EFAULT;
+ }
+
+ error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
+
+ if (copy_in_user(&cioc->frame.hdr.cmd_status,
+ &ioc->frame.hdr.cmd_status, sizeof(u8))) {
+ printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
+ return -EFAULT;
+ }
+ return error;
+}
+
+static long
+megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case MEGASAS_IOC_FIRMWARE32:
+ return megasas_mgmt_compat_ioctl_fw(file, arg);
+ case MEGASAS_IOC_GET_AEN:
+ return megasas_mgmt_ioctl_aen(file, arg);
+ }
+
+ return -ENOTTY;
+}
+#endif
+
+/*
+ * File operations structure for management interface
+ */
+static const struct file_operations megasas_mgmt_fops = {
+ .owner = THIS_MODULE,
+ .open = megasas_mgmt_open,
+ .fasync = megasas_mgmt_fasync,
+ .unlocked_ioctl = megasas_mgmt_ioctl,
+ .poll = megasas_mgmt_poll,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = megasas_mgmt_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
+/*
+ * PCI hotplug support registration structure
+ */
+static struct pci_driver megasas_pci_driver = {
+
+ .name = "megaraid_sas",
+ .id_table = megasas_pci_table,
+ .probe = megasas_probe_one,
+ .remove = megasas_detach_one,
+ .suspend = megasas_suspend,
+ .resume = megasas_resume,
+ .shutdown = megasas_shutdown,
+};
+
+/*
+ * Sysfs driver attributes
+ */
+static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
+{
+ return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
+ MEGASAS_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
+
+static ssize_t
+megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
+{
+ return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
+ MEGASAS_RELDATE);
+}
+
+static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
+ NULL);
+
+static ssize_t
+megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_poll_for_event);
+}
+
+static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
+ megasas_sysfs_show_support_poll_for_event, NULL);
+
+ static ssize_t
+megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_device_change);
+}
+
+static DRIVER_ATTR(support_device_change, S_IRUGO,
+ megasas_sysfs_show_support_device_change, NULL);
+
+static ssize_t
+megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", megasas_dbg_lvl);
+}
+
+static ssize_t
+megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
+{
+ int retval = count;
+ if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
+ printk(KERN_ERR "megasas: could not set dbg_lvl\n");
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
+ megasas_sysfs_set_dbg_lvl);
+
+static void
+megasas_aen_polling(struct work_struct *work)
+{
+ struct megasas_aen_event *ev =
+ container_of(work, struct megasas_aen_event, hotplug_work.work);
+ struct megasas_instance *instance = ev->instance;
+ union megasas_evt_class_locale class_locale;
+ struct Scsi_Host *host;
+ struct scsi_device *sdev1;
+ u16 pd_index = 0;
+ u16 ld_index = 0;
+ int i, j, doscan = 0;
+ u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
+ int error;
+
+ if (!instance) {
+ printk(KERN_ERR "invalid instance!\n");
+ kfree(ev);
+ return;
+ }
+
+ /* Adjust event workqueue thread wait time for VF mode */
+ if (instance->requestorId)
+ wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
+
+ /* Don't run the event workqueue thread if OCR is running */
+ for (i = 0; i < wait_time; i++) {
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
+ break;
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: %s waiting for "
+ "controller reset to finish for scsi%d\n",
+ __func__, instance->host->host_no);
+ }
+ msleep(1000);
+ }
+
+ instance->ev = NULL;
+ host = instance->host;
+ if (instance->evt_detail) {
+
+ switch (le32_to_cpu(instance->evt_detail->code)) {
+ case MR_EVT_PD_INSERTED:
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+
+ if (sdev1)
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
+ case MR_EVT_PD_REMOVED:
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ pd_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 =
+ scsi_device_lookup(host, i, j, 0);
+
+ if (instance->pd_list[pd_index].driveState
+ == MR_PD_STATE_SYSTEM) {
+ if (sdev1) {
+ scsi_device_put(sdev1);
+ }
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+ doscan = 0;
+ break;
+
+ case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
+ case MR_EVT_LD_DELETED:
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ doscan = 0;
+ }
+ break;
+ case MR_EVT_LD_CREATED:
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0;
+ j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ }
+ }
+ doscan = 0;
+ }
+ break;
+ case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
+ case MR_EVT_FOREIGN_CFG_IMPORTED:
+ case MR_EVT_LD_STATE_CHANGE:
+ doscan = 1;
+ break;
+ default:
+ doscan = 0;
+ break;
+ }
+ } else {
+ printk(KERN_ERR "invalid evt_detail!\n");
+ kfree(ev);
+ return;
+ }
+
+ if (doscan) {
+ printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+ instance->host->host_no);
+ if (megasas_get_pd_list(instance) == 0) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1) {
+ scsi_add_device(host, i, j, 0);
+ }
+ if (sdev1)
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+
+ if (!instance->requestorId ||
+ (instance->requestorId &&
+ megasas_get_ld_vf_affiliation(instance, 0))) {
+ if (megasas_ld_list_query(instance,
+ MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
+ megasas_get_ld_list(instance);
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
+ j++) {
+ ld_index =
+ (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+
+ sdev1 = scsi_device_lookup(host,
+ MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index]
+ != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if ( instance->aen_cmd != NULL ) {
+ kfree(ev);
+ return ;
+ }
+
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+
+ /* Register AEN with FW for latest sequence number plus 1 */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+ mutex_lock(&instance->aen_mutex);
+ error = megasas_register_aen(instance, seq_num,
+ class_locale.word);
+ mutex_unlock(&instance->aen_mutex);
+
+ if (error)
+ printk(KERN_ERR "register aen failed error %x\n", error);
+
+ kfree(ev);
+}
+
+/**
+ * megasas_init - Driver load entry point
+ */
+static int __init megasas_init(void)
+{
+ int rval;
+
+ /*
+ * Announce driver version and other information
+ */
+ printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
+ MEGASAS_EXT_VERSION);
+
+ spin_lock_init(&poll_aen_lock);
+
+ support_poll_for_event = 2;
+ support_device_change = 1;
+
+ memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
+
+ /*
+ * Register character device node
+ */
+ rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
+
+ if (rval < 0) {
+ printk(KERN_DEBUG "megasas: failed to open device node\n");
+ return rval;
+ }
+
+ megasas_mgmt_majorno = rval;
+
+ /*
+ * Register ourselves as PCI hotplug module
+ */
+ rval = pci_register_driver(&megasas_pci_driver);
+
+ if (rval) {
+ printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
+ goto err_pcidrv;
+ }
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_version);
+ if (rval)
+ goto err_dcf_attr_ver;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
+ if (rval)
+ goto err_dcf_rel_date;
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+ if (rval)
+ goto err_dcf_support_poll_for_event;
+
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+ if (rval)
+ goto err_dcf_dbg_lvl;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ if (rval)
+ goto err_dcf_support_device_change;
+
+ return rval;
+
+err_dcf_support_device_change:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+err_dcf_dbg_lvl:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+
+err_dcf_support_poll_for_event:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
+
+err_dcf_rel_date:
+ driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+err_dcf_attr_ver:
+ pci_unregister_driver(&megasas_pci_driver);
+err_pcidrv:
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+ return rval;
+}
+
+/**
+ * megasas_exit - Driver unload entry point
+ */
+static void __exit megasas_exit(void)
+{
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_dbg_lvl);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_poll_for_event);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_release_date);
+ driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+
+ pci_unregister_driver(&megasas_pci_driver);
+ unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
+}
+
+module_init(megasas_init);
+module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
new file mode 100644
index 00000000000..081bfff12d0
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -0,0 +1,1241 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2012 LSI Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * FILE: megaraid_sas_fp.c
+ *
+ * Authors: LSI Corporation
+ * Sumant Patro
+ * Varad Talamacki
+ * Manoj Jose
+ *
+ * Send feedback to: <megaraidlinux@lsi.com>
+ *
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: Linuxraid
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+#include <asm/div64.h>
+
+#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
+#define MR_LD_STATE_OPTIMAL 3
+#define FALSE 0
+#define TRUE 1
+
+#define SPAN_DEBUG 0
+#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
+#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
+#define SPAN_INVALID 0xff
+
+/* Prototypes */
+void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo);
+
+static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo);
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+ u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
+static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
+ u64 strip, struct MR_FW_RAID_MAP_ALL *map);
+
+u32 mega_mod64(u64 dividend, u32 divisor)
+{
+ u64 d;
+ u32 remainder;
+
+ if (!divisor)
+ printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
+ d = dividend;
+ remainder = do_div(d, divisor);
+ return remainder;
+}
+
+/**
+ * @param dividend : Dividend
+ * @param divisor : Divisor
+ *
+ * @return quotient
+ **/
+u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+{
+ u32 remainder;
+ u64 d;
+
+ if (!divisor)
+ printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
+
+ d = dividend;
+ remainder = do_div(d, divisor);
+
+ return d;
+}
+
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].ldRaid;
+}
+
+static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
+ struct MR_FW_RAID_MAP_ALL
+ *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[0];
+}
+
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
+}
+
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
+}
+
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
+}
+
+u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.devHndlInfo[pd].curDevHdl;
+}
+
+u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+{
+ return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
+}
+
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+{
+ return map->raidMap.ldTgtIdToLd[ldTgtId];
+}
+
+static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
+ struct MR_FW_RAID_MAP_ALL *map)
+{
+ return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
+}
+
+/*
+ * This function will validate Map info data provided by FW
+ */
+u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
+ struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+ struct MR_LD_RAID *raid;
+ int ldCount, num_lds;
+ u16 ld;
+
+
+ if (le32_to_cpu(pFwRaidMap->totalSize) !=
+ (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
+ printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
+ (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
+ sizeof(struct MR_LD_SPAN_MAP)) +
+ (sizeof(struct MR_LD_SPAN_MAP) *
+ le32_to_cpu(pFwRaidMap->ldCount))));
+ printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
+ ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+ le32_to_cpu(pFwRaidMap->totalSize));
+ return 0;
+ }
+
+ if (instance->UnevenSpanSupport)
+ mr_update_span_set(map, ldSpanInfo);
+
+ mr_update_load_balance_params(map, lbInfo);
+
+ num_lds = le32_to_cpu(map->raidMap.ldCount);
+
+ /*Convert Raid capability values to CPU arch */
+ for (ldCount = 0; ldCount < num_lds; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ raid = MR_LdRaidGet(ld, map);
+ le32_to_cpus((u32 *)&raid->capability);
+ }
+
+ return 1;
+}
+
+u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+ struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
+ struct MR_QUAD_ELEMENT *quad;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 span, j;
+
+ for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
+
+ for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
+ quad = &pSpanBlock->block_span_info.quad[j];
+
+ if (le32_to_cpu(quad->diff) == 0)
+ return SPAN_INVALID;
+ if (le64_to_cpu(quad->logStart) <= row && row <=
+ le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
+ if (span_blk != NULL) {
+ u64 blk, debugBlk;
+ blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
+ debugBlk = blk;
+
+ blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* Function to print info about span set created in driver from FW raid map
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*/
+#if SPAN_DEBUG
+static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+{
+
+ u8 span;
+ u32 element;
+ struct MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u16 ld;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES)
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
+ ld, raid->spanDepth);
+ for (span = 0; span < raid->spanDepth; span++)
+ dev_dbg(&instance->pdev->dev, "Span=%x,"
+ " number of quads=%x\n", span,
+ le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements));
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ if (span_set->span_row_data_width == 0)
+ break;
+
+ dev_dbg(&instance->pdev->dev, "Span Set %x:"
+ "width=%x, diff=%x\n", element,
+ (unsigned int)span_set->span_row_data_width,
+ (unsigned int)span_set->diff);
+ dev_dbg(&instance->pdev->dev, "logical LBA"
+ "start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)span_set->log_start_lba,
+ (long unsigned int)span_set->log_end_lba);
+ dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->span_row_start,
+ (long unsigned int)span_set->span_row_end);
+ dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->data_row_start,
+ (long unsigned int)span_set->data_row_end);
+ dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
+ " end=0x%08lx\n",
+ (long unsigned int)span_set->data_strip_start,
+ (long unsigned int)span_set->data_strip_end);
+
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >=
+ element + 1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+ dev_dbg(&instance->pdev->dev, "Span=%x,"
+ "Quad=%x, diff=%x\n", span,
+ element, le32_to_cpu(quad->diff));
+ dev_dbg(&instance->pdev->dev,
+ "offset_in_span=0x%08lx\n",
+ (long unsigned int)le64_to_cpu(quad->offsetInSpan));
+ dev_dbg(&instance->pdev->dev,
+ "logical start=0x%08lx, end=0x%08lx\n",
+ (long unsigned int)le64_to_cpu(quad->logStart),
+ (long unsigned int)le64_to_cpu(quad->logEnd));
+ }
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+/*
+******************************************************************************
+*
+* This routine calculates the Span block for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+* div_error - Devide error code.
+*/
+
+u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+ u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ u32 span, info;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].
+ block_span_info.quad[info];
+ if (le32_to_cpu(quad->diff == 0))
+ return SPAN_INVALID;
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ (mega_mod64(row - le64_to_cpu(quad->logStart),
+ le32_to_cpu(quad->diff))) == 0) {
+ if (span_blk != NULL) {
+ u64 blk;
+ blk = mega_div64_32
+ ((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ blk = (blk + le64_to_cpu(quad->offsetInSpan))
+ << raid->stripeShift;
+ *span_blk = blk;
+ }
+ return span;
+ }
+ }
+ }
+ return SPAN_INVALID;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the row for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* Strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* row - row associated with strip
+*/
+
+static u64 get_row_from_strip(struct megasas_instance *instance,
+ u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 info, strip_offset, span, span_offset;
+ u64 span_set_Strip, span_set_Row, retval;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ span_set_Strip = strip - span_set->data_strip_start;
+ strip_offset = mega_mod64(span_set_Strip,
+ span_set->span_row_data_width);
+ span_set_Row = mega_div64_32(span_set_Strip,
+ span_set->span_row_data_width) * span_set->diff;
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements >= info+1)) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset++;
+ else
+ break;
+ }
+#if SPAN_DEBUG
+ dev_info(&instance->pdev->dev, "Strip 0x%llx,"
+ "span_set_Strip 0x%llx, span_set_Row 0x%llx"
+ "data width 0x%llx span offset 0x%x\n", strip,
+ (unsigned long long)span_set_Strip,
+ (unsigned long long)span_set_Row,
+ (unsigned long long)span_set->span_row_data_width,
+ span_offset);
+ dev_info(&instance->pdev->dev, "For strip 0x%llx"
+ "row is 0x%llx\n", strip,
+ (unsigned long long) span_set->data_row_start +
+ (unsigned long long) span_set_Row + (span_offset - 1));
+#endif
+ retval = (span_set->data_row_start + span_set_Row +
+ (span_offset - 1));
+ return retval;
+ }
+ return -1LLU;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the Start Strip for given row using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* row - Row number
+* map - LD map
+*
+* Outputs :
+*
+* Strip - Start strip associated with row
+*/
+
+static u64 get_strip_from_row(struct megasas_instance *instance,
+ u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ struct MR_QUAD_ELEMENT *quad;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 span, info;
+ u64 strip;
+
+ for (info = 0; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (row > span_set->data_row_end)
+ continue;
+
+ for (span = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.quad[info];
+ if (le64_to_cpu(quad->logStart) <= row &&
+ row <= le64_to_cpu(quad->logEnd) &&
+ mega_mod64((row - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff)) == 0) {
+ strip = mega_div64_32
+ (((row - span_set->data_row_start)
+ - le64_to_cpu(quad->logStart)),
+ le32_to_cpu(quad->diff));
+ strip *= span_set->span_row_data_width;
+ strip += span_set->data_strip_start;
+ strip += span_set->strip_offset[span];
+ return strip;
+ }
+ }
+ }
+ dev_err(&instance->pdev->dev, "get_strip_from_row"
+ "returns invalid strip for ld=%x, row=%lx\n",
+ ld, (long unsigned int)row);
+ return -1;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the Physical Arm for given strip using spanset.
+*
+* Inputs :
+* instance - HBA instance
+* ld - Logical drive number
+* strip - Strip
+* map - LD map
+*
+* Outputs :
+*
+* Phys Arm - Phys Arm associated with strip
+*/
+
+static u32 get_arm_from_strip(struct megasas_instance *instance,
+ u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ LD_SPAN_SET *span_set;
+ PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
+ u32 info, strip_offset, span, span_offset, retval;
+
+ for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
+ span_set = &(ldSpanInfo[ld].span_set[info]);
+
+ if (span_set->span_row_data_width == 0)
+ break;
+ if (strip > span_set->data_strip_end)
+ continue;
+
+ strip_offset = (uint)mega_mod64
+ ((strip - span_set->data_strip_start),
+ span_set->span_row_data_width);
+
+ for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) >= info+1) {
+ if (strip_offset >=
+ span_set->strip_offset[span])
+ span_offset =
+ span_set->strip_offset[span];
+ else
+ break;
+ }
+#if SPAN_DEBUG
+ dev_info(&instance->pdev->dev, "get_arm_from_strip:"
+ "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
+ (long unsigned int)strip, (strip_offset - span_offset));
+#endif
+ retval = (strip_offset - span_offset);
+ return retval;
+ }
+
+ dev_err(&instance->pdev->dev, "get_arm_from_strip"
+ "returns invalid arm for ld=%x strip=%lx\n",
+ ld, (long unsigned int)strip);
+
+ return -1;
+}
+
+/* This Function will return Phys arm */
+u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+ struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ /* Need to check correct default value */
+ u32 arm = 0;
+
+ switch (raid->level) {
+ case 0:
+ case 5:
+ case 6:
+ arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
+ break;
+ case 1:
+ /* start with logical arm */
+ arm = get_arm_from_strip(instance, ld, stripe, map);
+ if (arm != -1U)
+ arm *= 2;
+ break;
+ }
+
+ return arm;
+}
+
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe using spanset
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
+ u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 pd, arRef;
+ u8 physArm, span;
+ u64 row;
+ u8 retval = TRUE;
+ u8 do_invader = 0;
+ u64 *pdBlock = &io_info->pdBlock;
+ u16 *pDevHandle = &io_info->devHandle;
+ u32 logArm, rowMod, armQ, arm;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+ instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ do_invader = 1;
+
+ /*Get row and span from io_info for Uneven Span IO.*/
+ row = io_info->start_row;
+ span = io_info->start_span;
+
+
+ if (raid->level == 6) {
+ logArm = get_arm_from_strip(instance, ld, stripRow, map);
+ if (logArm == -1U)
+ return FALSE;
+ rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
+ armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
+ arm = armQ + 1 + logArm;
+ if (arm >= SPAN_ROW_SIZE(map, ld, span))
+ arm -= SPAN_ROW_SIZE(map, ld, span);
+ physArm = (u8)arm;
+ } else
+ /* Calculate the arm */
+ physArm = get_arm(instance, ld, span, stripRow, map);
+ if (physArm == 0xFF)
+ return FALSE;
+
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map);
+
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID;
+ if ((raid->level >= 5) &&
+ (!do_invader || (do_invader &&
+ (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+ physArm;
+ return retval;
+}
+
+/*
+******************************************************************************
+*
+* This routine calculates the arm, span and block for the specified stripe and
+* reference in stripe.
+*
+* Inputs :
+*
+* ld - Logical drive number
+* stripRow - Stripe number
+* stripRef - Reference in stripe
+*
+* Outputs :
+*
+* span - Span number
+* block - Absolute Block number in the physical disk
+*/
+u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+ u16 stripRef, struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_FW_RAID_MAP_ALL *map)
+{
+ struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
+ u32 pd, arRef;
+ u8 physArm, span;
+ u64 row;
+ u8 retval = TRUE;
+ u8 do_invader = 0;
+ u64 *pdBlock = &io_info->pdBlock;
+ u16 *pDevHandle = &io_info->devHandle;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
+ instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ do_invader = 1;
+
+ row = mega_div64_32(stripRow, raid->rowDataSize);
+
+ if (raid->level == 6) {
+ /* logical arm within row */
+ u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
+ u32 rowMod, armQ, arm;
+
+ if (raid->rowSize == 0)
+ return FALSE;
+ /* get logical row mod */
+ rowMod = mega_mod64(row, raid->rowSize);
+ armQ = raid->rowSize-1-rowMod; /* index of Q drive */
+ arm = armQ+1+logArm; /* data always logically follows Q */
+ if (arm >= raid->rowSize) /* handle wrap condition */
+ arm -= raid->rowSize;
+ physArm = (u8)arm;
+ } else {
+ if (raid->modFactor == 0)
+ return FALSE;
+ physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
+ raid->modFactor),
+ map);
+ }
+
+ if (raid->spanDepth == 1) {
+ span = 0;
+ *pdBlock = row << raid->stripeShift;
+ } else {
+ span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
+ if (span == SPAN_INVALID)
+ return FALSE;
+ }
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, span, map);
+ pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
+
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd. */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ else {
+ *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
+ if ((raid->level >= 5) &&
+ (!do_invader || (do_invader &&
+ (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
+ pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+ else if (raid->level == 1) {
+ /* Get alternate Pd. */
+ pd = MR_ArPdGet(arRef, physArm + 1, map);
+ if (pd != MR_PD_INVALID)
+ /* Get dev handle from Pd */
+ *pDevHandle = MR_PdDevHandleGet(pd, map);
+ }
+ }
+
+ *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
+ pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
+ physArm;
+ return retval;
+}
+
+/*
+******************************************************************************
+*
+* MR_BuildRaidContext function
+*
+* This function will initiate command processing. The start/end row and strip
+* information is calculated then the lock is acquired.
+* This function will return 0 if region lock was acquired OR return num strips
+*/
+u8
+MR_BuildRaidContext(struct megasas_instance *instance,
+ struct IO_REQUEST_INFO *io_info,
+ struct RAID_CONTEXT *pRAID_Context,
+ struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
+{
+ struct MR_LD_RAID *raid;
+ u32 ld, stripSize, stripe_mask;
+ u64 endLba, endStrip, endRow, start_row, start_strip;
+ u64 regStart;
+ u32 regSize;
+ u8 num_strips, numRows;
+ u16 ref_in_start_stripe, ref_in_end_stripe;
+ u64 ldStartBlock;
+ u32 numBlocks, ldTgtId;
+ u8 isRead;
+ u8 retval = 0;
+ u8 startlba_span = SPAN_INVALID;
+ u64 *pdBlock = &io_info->pdBlock;
+
+ ldStartBlock = io_info->ldStartBlock;
+ numBlocks = io_info->numBlocks;
+ ldTgtId = io_info->ldTgtId;
+ isRead = io_info->isRead;
+ io_info->IoforUnevenSpan = 0;
+ io_info->start_span = SPAN_INVALID;
+
+ ld = MR_TargetIdToLdGet(ldTgtId, map);
+ raid = MR_LdRaidGet(ld, map);
+
+ /*
+ * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
+ * return FALSE
+ */
+ if (raid->rowDataSize == 0) {
+ if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
+ return FALSE;
+ else if (instance->UnevenSpanSupport) {
+ io_info->IoforUnevenSpan = 1;
+ } else {
+ dev_info(&instance->pdev->dev,
+ "raid->rowDataSize is 0, but has SPAN[0]"
+ "rowDataSize = 0x%0x,"
+ "but there is _NO_ UnevenSpanSupport\n",
+ MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
+ return FALSE;
+ }
+ }
+
+ stripSize = 1 << raid->stripeShift;
+ stripe_mask = stripSize-1;
+
+
+ /*
+ * calculate starting row and stripe, and number of strips and rows
+ */
+ start_strip = ldStartBlock >> raid->stripeShift;
+ ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
+ endLba = ldStartBlock + numBlocks - 1;
+ ref_in_end_stripe = (u16)(endLba & stripe_mask);
+ endStrip = endLba >> raid->stripeShift;
+ num_strips = (u8)(endStrip - start_strip + 1); /* End strip */
+
+ if (io_info->IoforUnevenSpan) {
+ start_row = get_row_from_strip(instance, ld, start_strip, map);
+ endRow = get_row_from_strip(instance, ld, endStrip, map);
+ if (start_row == -1ULL || endRow == -1ULL) {
+ dev_info(&instance->pdev->dev, "return from %s %d."
+ "Send IO w/o region lock.\n",
+ __func__, __LINE__);
+ return FALSE;
+ }
+
+ if (raid->spanDepth == 1) {
+ startlba_span = 0;
+ *pdBlock = start_row << raid->stripeShift;
+ } else
+ startlba_span = (u8)mr_spanset_get_span_block(instance,
+ ld, start_row, pdBlock, map);
+ if (startlba_span == SPAN_INVALID) {
+ dev_info(&instance->pdev->dev, "return from %s %d"
+ "for row 0x%llx,start strip %llx"
+ "endSrip %llx\n", __func__, __LINE__,
+ (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip);
+ return FALSE;
+ }
+ io_info->start_span = startlba_span;
+ io_info->start_row = start_row;
+#if SPAN_DEBUG
+ dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
+ "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
+ " span 0x%x\n", __func__, __LINE__,
+ (unsigned long long)start_row,
+ (unsigned long long)start_strip,
+ (unsigned long long)endStrip, startlba_span);
+ dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
+ "Start span 0x%x\n", (unsigned long long)start_row,
+ (unsigned long long)endRow, startlba_span);
+#endif
+ } else {
+ start_row = mega_div64_32(start_strip, raid->rowDataSize);
+ endRow = mega_div64_32(endStrip, raid->rowDataSize);
+ }
+ numRows = (u8)(endRow - start_row + 1);
+
+ /*
+ * calculate region info.
+ */
+
+ /* assume region is at the start of the first row */
+ regStart = start_row << raid->stripeShift;
+ /* assume this IO needs the full row - we'll adjust if not true */
+ regSize = stripSize;
+
+ /* Check if we can send this I/O via FastPath */
+ if (raid->capability.fpCapable) {
+ if (isRead)
+ io_info->fpOkForIo = (raid->capability.fpReadCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpReadAcrossStripe));
+ else
+ io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
+ ((num_strips == 1) ||
+ raid->capability.
+ fpWriteAcrossStripe));
+ } else
+ io_info->fpOkForIo = FALSE;
+
+ if (numRows == 1) {
+ /* single-strip IOs can always lock only the data needed */
+ if (num_strips == 1) {
+ regStart += ref_in_start_stripe;
+ regSize = numBlocks;
+ }
+ /* multi-strip IOs always need to full stripe locked */
+ } else if (io_info->IoforUnevenSpan == 0) {
+ /*
+ * For Even span region lock optimization.
+ * If the start strip is the last in the start row
+ */
+ if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
+ regStart += ref_in_start_stripe;
+ /* initialize count to sectors from startref to end
+ of strip */
+ regSize = stripSize - ref_in_start_stripe;
+ }
+
+ /* add complete rows in the middle of the transfer */
+ if (numRows > 2)
+ regSize += (numRows-2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row*/
+ if (endStrip == endRow*raid->rowDataSize)
+ regSize += ref_in_end_stripe+1;
+ else
+ regSize += stripSize;
+ } else {
+ /*
+ * For Uneven span region lock optimization.
+ * If the start strip is the last in the start row
+ */
+ if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
+ SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
+ regStart += ref_in_start_stripe;
+ /* initialize count to sectors from
+ * startRef to end of strip
+ */
+ regSize = stripSize - ref_in_start_stripe;
+ }
+ /* Add complete rows in the middle of the transfer*/
+
+ if (numRows > 2)
+ /* Add complete rows in the middle of the transfer*/
+ regSize += (numRows-2) << raid->stripeShift;
+
+ /* if IO ends within first strip of last row */
+ if (endStrip == get_strip_from_row(instance, ld, endRow, map))
+ regSize += ref_in_end_stripe + 1;
+ else
+ regSize += stripSize;
+ }
+
+ pRAID_Context->timeoutValue =
+ cpu_to_le16(raid->fpIoTimeoutForLd ?
+ raid->fpIoTimeoutForLd :
+ map->raidMap.fpPdIoTimeoutSec);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ pRAID_Context->regLockFlags = (isRead) ?
+ raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
+ else
+ pRAID_Context->regLockFlags = (isRead) ?
+ REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
+ pRAID_Context->VirtualDiskTgtId = raid->targetId;
+ pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
+ pRAID_Context->regLockLength = cpu_to_le32(regSize);
+ pRAID_Context->configSeqNum = raid->seqNum;
+ /* save pointer to raid->LUN array */
+ *raidLUN = raid->LUN;
+
+
+ /*Get Phy Params only if FP capable, or else leave it to MR firmware
+ to do the calculation.*/
+ if (io_info->fpOkForIo) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(instance, ld,
+ start_strip, ref_in_start_stripe,
+ io_info, pRAID_Context, map) :
+ MR_GetPhyParams(instance, ld, start_strip,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map);
+ /* If IO on an invalid Pd, then FP is not possible.*/
+ if (io_info->devHandle == MR_PD_INVALID)
+ io_info->fpOkForIo = FALSE;
+ return retval;
+ } else if (isRead) {
+ uint stripIdx;
+ for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
+ retval = io_info->IoforUnevenSpan ?
+ mr_spanset_get_phy_params(instance, ld,
+ start_strip + stripIdx,
+ ref_in_start_stripe, io_info,
+ pRAID_Context, map) :
+ MR_GetPhyParams(instance, ld,
+ start_strip + stripIdx, ref_in_start_stripe,
+ io_info, pRAID_Context, map);
+ if (!retval)
+ return TRUE;
+ }
+ }
+
+#if SPAN_DEBUG
+ /* Just for testing what arm we get for strip.*/
+ if (io_info->IoforUnevenSpan)
+ get_arm_from_strip(instance, ld, start_strip, map);
+#endif
+ return TRUE;
+}
+
+/*
+******************************************************************************
+*
+* This routine pepare spanset info from Valid Raid map and store it into
+* local copy of ldSpanInfo per instance data structure.
+*
+* Inputs :
+* map - LD map
+* ldSpanInfo - ldSpanInfo per HBA instance
+*
+*/
+void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
+{
+ u8 span, count;
+ u32 element, span_row_width;
+ u64 span_row;
+ struct MR_LD_RAID *raid;
+ LD_SPAN_SET *span_set, *span_set_prev;
+ struct MR_QUAD_ELEMENT *quad;
+ int ldCount;
+ u16 ld;
+
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES)
+ continue;
+ raid = MR_LdRaidGet(ld, map);
+ for (element = 0; element < MAX_QUAD_DEPTH; element++) {
+ for (span = 0; span < raid->spanDepth; span++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
+ block_span_info.noElements) <
+ element + 1)
+ continue;
+ span_set = &(ldSpanInfo[ld].span_set[element]);
+ quad = &map->raidMap.ldSpanMap[ld].
+ spanBlock[span].block_span_info.
+ quad[element];
+
+ span_set->diff = le32_to_cpu(quad->diff);
+
+ for (count = 0, span_row_width = 0;
+ count < raid->spanDepth; count++) {
+ if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
+ spanBlock[count].
+ block_span_info.
+ noElements) >= element + 1) {
+ span_set->strip_offset[count] =
+ span_row_width;
+ span_row_width +=
+ MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize;
+ printk(KERN_INFO "megasas:"
+ "span %x rowDataSize %x\n",
+ count, MR_LdSpanPtrGet
+ (ld, count, map)->spanRowDataSize);
+ }
+ }
+
+ span_set->span_row_data_width = span_row_width;
+ span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
+ le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
+ le32_to_cpu(quad->diff));
+
+ if (element == 0) {
+ span_set->log_start_lba = 0;
+ span_set->log_end_lba =
+ ((span_row << raid->stripeShift)
+ * span_row_width) - 1;
+
+ span_set->span_row_start = 0;
+ span_set->span_row_end = span_row - 1;
+
+ span_set->data_strip_start = 0;
+ span_set->data_strip_end =
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start = 0;
+ span_set->data_row_end =
+ (span_row * le32_to_cpu(quad->diff)) - 1;
+ } else {
+ span_set_prev = &(ldSpanInfo[ld].
+ span_set[element - 1]);
+ span_set->log_start_lba =
+ span_set_prev->log_end_lba + 1;
+ span_set->log_end_lba =
+ span_set->log_start_lba +
+ ((span_row << raid->stripeShift)
+ * span_row_width) - 1;
+
+ span_set->span_row_start =
+ span_set_prev->span_row_end + 1;
+ span_set->span_row_end =
+ span_set->span_row_start + span_row - 1;
+
+ span_set->data_strip_start =
+ span_set_prev->data_strip_end + 1;
+ span_set->data_strip_end =
+ span_set->data_strip_start +
+ (span_row * span_row_width) - 1;
+
+ span_set->data_row_start =
+ span_set_prev->data_row_end + 1;
+ span_set->data_row_end =
+ span_set->data_row_start +
+ (span_row * le32_to_cpu(quad->diff)) - 1;
+ }
+ break;
+ }
+ if (span == raid->spanDepth)
+ break;
+ }
+ }
+#if SPAN_DEBUG
+ getSpanInfo(map, ldSpanInfo);
+#endif
+
+}
+
+void
+mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo)
+{
+ int ldCount;
+ u16 ld;
+ struct MR_LD_RAID *raid;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, map);
+ if (ld >= MAX_LOGICAL_DRIVES) {
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+
+ raid = MR_LdRaidGet(ld, map);
+
+ /* Two drive Optimal RAID 1 */
+ if ((raid->level == 1) && (raid->rowSize == 2) &&
+ (raid->spanDepth == 1) && raid->ldState ==
+ MR_LD_STATE_OPTIMAL) {
+ u32 pd, arRef;
+
+ lbInfo[ldCount].loadBalanceFlag = 1;
+
+ /* Get the array on which this span is present */
+ arRef = MR_LdSpanArrayGet(ld, 0, map);
+
+ /* Get the Pd */
+ pd = MR_ArPdGet(arRef, 0, map);
+ /* Get dev handle from Pd */
+ lbInfo[ldCount].raid1DevHandle[0] =
+ MR_PdDevHandleGet(pd, map);
+ /* Get the Pd */
+ pd = MR_ArPdGet(arRef, 1, map);
+
+ /* Get the dev handle from Pd */
+ lbInfo[ldCount].raid1DevHandle[1] =
+ MR_PdDevHandleGet(pd, map);
+ } else
+ lbInfo[ldCount].loadBalanceFlag = 0;
+ }
+}
+
+u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
+ u32 count)
+{
+ u16 pend0, pend1;
+ u64 diff0, diff1;
+ u8 bestArm;
+
+ /* get the pending cmds for the data and mirror arms */
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+
+ /* Determine the disk whose head is nearer to the req. block */
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
+ bestArm = (diff0 <= diff1 ? 0 : 1);
+
+ /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
+ if ((bestArm == arm && pend0 > pend1 + 4) ||
+ (bestArm != arm && pend1 > pend0 + 4))
+ bestArm ^= 1;
+
+ /* Update the last accessed block on the correct pd */
+ lbInfo->last_accessed_block[bestArm] = block + count - 1;
+
+ return bestArm;
+}
+
+u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
+ struct IO_REQUEST_INFO *io_info)
+{
+ u8 arm, old_arm;
+ u16 devHandle;
+
+ old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
+
+ /* get best new arm */
+ arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
+ io_info->numBlocks);
+ devHandle = lbInfo->raid1DevHandle[arm];
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
+
+ return devHandle;
+}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
new file mode 100644
index 00000000000..22600419ae9
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -0,0 +1,2676 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2012 LSI Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * FILE: megaraid_sas_fusion.c
+ *
+ * Authors: LSI Corporation
+ * Sumant Patro
+ * Adam Radford <linuxraid@lsi.com>
+ *
+ * Send feedback to: <megaraidlinux@lsi.com>
+ *
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: Linuxraid
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "megaraid_sas_fusion.h"
+#include "megaraid_sas.h"
+
+extern void megasas_free_cmds(struct megasas_instance *instance);
+extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
+ *instance);
+extern void
+megasas_complete_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, u8 alt_status);
+int megasas_is_ldio(struct scsi_cmnd *cmd);
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds);
+
+void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
+int megasas_alloc_cmds(struct megasas_instance *instance);
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
+int
+megasas_issue_polled(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
+u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
+ struct IO_REQUEST_INFO *in_info);
+int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
+void megaraid_sas_kill_hba(struct megasas_instance *instance);
+
+extern u32 megasas_dbg_lvl;
+void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
+int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
+ int initial);
+void megasas_start_timer(struct megasas_instance *instance,
+ struct timer_list *timer,
+ void *fn, unsigned long interval);
+extern struct megasas_mgmt_info megasas_mgmt_info;
+extern int resetwaittime;
+
+/**
+ * megasas_enable_intr_fusion - Enables interrupts
+ * @regs: MFI register set
+ */
+void
+megasas_enable_intr_fusion(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ /* For Thunderbolt/Invader also clear intr on enable */
+ writel(~0, &regs->outbound_intr_status);
+ readl(&regs->outbound_intr_status);
+
+ writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
+
+ /* Dummy readl to force pci flush */
+ readl(&regs->outbound_intr_mask);
+ instance->mask_interrupts = 0;
+}
+
+/**
+ * megasas_disable_intr_fusion - Disables interrupt
+ * @regs: MFI register set
+ */
+void
+megasas_disable_intr_fusion(struct megasas_instance *instance)
+{
+ u32 mask = 0xFFFFFFFF;
+ u32 status;
+ struct megasas_register_set __iomem *regs;
+ regs = instance->reg_set;
+ instance->mask_interrupts = 1;
+
+ writel(mask, &regs->outbound_intr_mask);
+ /* Dummy readl to force pci flush */
+ status = readl(&regs->outbound_intr_mask);
+}
+
+int
+megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
+{
+ u32 status;
+ /*
+ * Check if it is our interrupt
+ */
+ status = readl(&regs->outbound_intr_status);
+
+ if (status & 1) {
+ writel(status, &regs->outbound_intr_status);
+ readl(&regs->outbound_intr_status);
+ return 1;
+ }
+ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * megasas_get_cmd_fusion - Get a command from the free pool
+ * @instance: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
+ *instance)
+{
+ unsigned long flags;
+ struct fusion_context *fusion =
+ (struct fusion_context *)instance->ctrl_context;
+ struct megasas_cmd_fusion *cmd = NULL;
+
+ spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+
+ if (!list_empty(&fusion->cmd_pool)) {
+ cmd = list_entry((&fusion->cmd_pool)->next,
+ struct megasas_cmd_fusion, list);
+ list_del_init(&cmd->list);
+ } else {
+ printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
+ }
+
+ spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+ return cmd;
+}
+
+/**
+ * megasas_return_cmd_fusion - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+static inline void
+megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd)
+{
+ unsigned long flags;
+ struct fusion_context *fusion =
+ (struct fusion_context *)instance->ctrl_context;
+
+ spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+
+ cmd->scmd = NULL;
+ cmd->sync_cmd_idx = (u32)ULONG_MAX;
+ list_add_tail(&cmd->list, &fusion->cmd_pool);
+
+ spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+}
+
+/**
+ * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
+ * @instance: Adapter soft state
+ */
+static void megasas_teardown_frame_pool_fusion(
+ struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ u16 max_cmd = instance->max_fw_cmds;
+
+ struct megasas_cmd_fusion *cmd;
+
+ if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
+ printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
+ "sense pool : %p\n", fusion->sg_dma_pool,
+ fusion->sense_dma_pool);
+ return;
+ }
+
+ /*
+ * Return all frames to pool
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = fusion->cmd_list[i];
+
+ if (cmd->sg_frame)
+ pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+
+ if (cmd->sense)
+ pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+
+ /*
+ * Now destroy the pool itself
+ */
+ pci_pool_destroy(fusion->sg_dma_pool);
+ pci_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sg_dma_pool = NULL;
+ fusion->sense_dma_pool = NULL;
+}
+
+/**
+ * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
+ * @instance: Adapter soft state
+ */
+void
+megasas_free_cmds_fusion(struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ u32 max_cmds, req_sz, reply_sz, io_frames_sz;
+
+
+ req_sz = fusion->request_alloc_sz;
+ reply_sz = fusion->reply_alloc_sz;
+ io_frames_sz = fusion->io_frames_alloc_sz;
+
+ max_cmds = instance->max_fw_cmds;
+
+ /* Free descriptors and request Frames memory */
+ if (fusion->req_frames_desc)
+ dma_free_coherent(&instance->pdev->dev, req_sz,
+ fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+
+ if (fusion->reply_frames_desc) {
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+ }
+
+ if (fusion->io_request_frames) {
+ pci_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ pci_pool_destroy(fusion->io_request_frames_pool);
+ }
+
+ /* Free the Fusion frame pool */
+ megasas_teardown_frame_pool_fusion(instance);
+
+ /* Free all the commands in the cmd_list */
+ for (i = 0; i < max_cmds; i++)
+ kfree(fusion->cmd_list[i]);
+
+ /* Free the cmd_list buffer itself */
+ kfree(fusion->cmd_list);
+ fusion->cmd_list = NULL;
+
+ INIT_LIST_HEAD(&fusion->cmd_pool);
+}
+
+/**
+ * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
+ * @instance: Adapter soft state
+ *
+ */
+static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
+{
+ int i;
+ u32 max_cmd;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ u32 total_sz_chain_frame;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_fw_cmds;
+
+ total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME;
+
+ /*
+ * Use DMA pool facility provided by PCI layer
+ */
+
+ fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion",
+ instance->pdev,
+ total_sz_chain_frame, 4,
+ 0);
+ if (!fusion->sg_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup request pool "
+ "fusion\n");
+ return -ENOMEM;
+ }
+ fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
+ instance->pdev,
+ SCSI_SENSE_BUFFERSIZE, 64, 0);
+
+ if (!fusion->sense_dma_pool) {
+ printk(KERN_DEBUG "megasas: failed to setup sense pool "
+ "fusion\n");
+ pci_pool_destroy(fusion->sg_dma_pool);
+ fusion->sg_dma_pool = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * Allocate and attach a frame to each of the commands in cmd_list
+ */
+ for (i = 0; i < max_cmd; i++) {
+
+ cmd = fusion->cmd_list[i];
+
+ cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
+ GFP_KERNEL,
+ &cmd->sg_frame_phys_addr);
+
+ cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &cmd->sense_phys_addr);
+ /*
+ * megasas_teardown_frame_pool_fusion() takes care of freeing
+ * whatever has been allocated
+ */
+ if (!cmd->sg_frame || !cmd->sense) {
+ printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
+ megasas_teardown_frame_pool_fusion(instance);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * megasas_alloc_cmds_fusion - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+ int i, j, count;
+ u32 max_cmd, io_frames_sz;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ u32 offset;
+ dma_addr_t io_req_base_phys;
+ u8 *io_req_base;
+
+ fusion = instance->ctrl_context;
+
+ max_cmd = instance->max_fw_cmds;
+
+ fusion->req_frames_desc =
+ dma_alloc_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
+
+ if (!fusion->req_frames_desc) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "request_frames\n");
+ goto fail_req_desc;
+ }
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ fusion->reply_frames_desc_pool =
+ pci_pool_create("reply_frames pool", instance->pdev,
+ fusion->reply_alloc_sz * count, 16, 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "reply_frame pool\n");
+ goto fail_reply_desc;
+ }
+
+ fusion->reply_frames_desc =
+ pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
+ &fusion->reply_frames_desc_phys);
+ if (!fusion->reply_frames_desc) {
+ printk(KERN_ERR "megasas; Could not allocate memory for "
+ "reply_frame pool\n");
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+ goto fail_reply_desc;
+ }
+
+ reply_desc = fusion->reply_frames_desc;
+ for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+
+ io_frames_sz = fusion->io_frames_alloc_sz;
+
+ fusion->io_request_frames_pool =
+ pci_pool_create("io_request_frames pool", instance->pdev,
+ fusion->io_frames_alloc_sz, 16, 0);
+
+ if (!fusion->io_request_frames_pool) {
+ printk(KERN_ERR "megasas: Could not allocate memory for "
+ "io_request_frame pool\n");
+ goto fail_io_frames;
+ }
+
+ fusion->io_request_frames =
+ pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+ if (!fusion->io_request_frames) {
+ printk(KERN_ERR "megasas: Could not allocate memory for "
+ "io_request_frames frames\n");
+ pci_pool_destroy(fusion->io_request_frames_pool);
+ goto fail_io_frames;
+ }
+
+ /*
+ * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
+ * max_cmd, GFP_KERNEL);
+
+ if (!fusion->cmd_list) {
+ printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
+ "memory for cmd_list_fusion\n");
+ goto fail_cmd_list;
+ }
+
+ max_cmd = instance->max_fw_cmds;
+ for (i = 0; i < max_cmd; i++) {
+ fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
+ GFP_KERNEL);
+ if (!fusion->cmd_list[i]) {
+ printk(KERN_ERR "Could not alloc cmd list fusion\n");
+
+ for (j = 0; j < i; j++)
+ kfree(fusion->cmd_list[j]);
+
+ kfree(fusion->cmd_list);
+ fusion->cmd_list = NULL;
+ goto fail_cmd_list;
+ }
+ }
+
+ /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
+ io_req_base = fusion->io_request_frames +
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = fusion->io_request_frames_phys +
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+
+ /*
+ * Add all the commands to command pool (fusion->cmd_pool)
+ */
+
+ /* SMID 0 is reserved. Set SMID/index from 1 */
+ for (i = 0; i < max_cmd; i++) {
+ cmd = fusion->cmd_list[i];
+ offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
+ memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
+ cmd->index = i + 1;
+ cmd->scmd = NULL;
+ cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */
+ cmd->instance = instance;
+ cmd->io_request =
+ (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ (io_req_base + offset);
+ memset(cmd->io_request, 0,
+ sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+ cmd->io_request_phys_addr = io_req_base_phys + offset;
+
+ list_add_tail(&cmd->list, &fusion->cmd_pool);
+ }
+
+ /*
+ * Create a frame pool and assign one frame to each cmd
+ */
+ if (megasas_create_frame_pool_fusion(instance)) {
+ printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+ megasas_free_cmds_fusion(instance);
+ goto fail_req_desc;
+ }
+
+ return 0;
+
+fail_cmd_list:
+ pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ pci_pool_destroy(fusion->io_request_frames_pool);
+fail_io_frames:
+ dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc,
+ fusion->reply_frames_desc_phys);
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+fail_reply_desc:
+ dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
+ fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+fail_req_desc:
+ return -ENOMEM;
+}
+
+/**
+ * wait_and_poll - Issues a polling command
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be issued
+ *
+ * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
+ */
+int
+wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
+ int seconds)
+{
+ int i;
+ struct megasas_header *frame_hdr = &cmd->frame->hdr;
+
+ u32 msecs = seconds * 1000;
+
+ /*
+ * Wait for cmd_status to change
+ */
+ for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
+ rmb();
+ msleep(20);
+ }
+
+ if (frame_hdr->cmd_status == 0xff)
+ return -ETIME;
+
+ return 0;
+}
+
+/**
+ * megasas_ioc_init_fusion - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * Issues the IOC Init cmd
+ */
+int
+megasas_ioc_init_fusion(struct megasas_instance *instance)
+{
+ struct megasas_init_frame *init_frame;
+ struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
+ dma_addr_t ioc_init_handle;
+ struct megasas_cmd *cmd;
+ u8 ret;
+ struct fusion_context *fusion;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
+ int i;
+ struct megasas_header *frame_hdr;
+
+ fusion = instance->ctrl_context;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
+ ret = 1;
+ goto fail_get_cmd;
+ }
+
+ IOCInitMessage =
+ dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ &ioc_init_handle, GFP_KERNEL);
+
+ if (!IOCInitMessage) {
+ printk(KERN_ERR "Could not allocate memory for "
+ "IOCInitMessage\n");
+ ret = 1;
+ goto fail_fw_init;
+ }
+
+ memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+ IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
+ IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
+ IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+ IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
+
+ IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
+ IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+ IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+ init_frame = (struct megasas_init_frame *)cmd->frame;
+ memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+
+ frame_hdr = &cmd->frame->hdr;
+ frame_hdr->cmd_status = 0xFF;
+ frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
+
+ init_frame->cmd = MFI_CMD_INIT;
+ init_frame->cmd_status = 0xFF;
+
+ /* driver support Extended MSIX */
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ init_frame->driver_operations.
+ mfi_capabilities.support_additional_msix = 1;
+ /* driver supports HA / Remote LUN over Fast Path interface */
+ init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
+ = 1;
+ /* Convert capability to LE32 */
+ cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
+
+ init_frame->queue_info_new_phys_addr_hi =
+ cpu_to_le32(upper_32_bits(ioc_init_handle));
+ init_frame->queue_info_new_phys_addr_lo =
+ cpu_to_le32(lower_32_bits(ioc_init_handle));
+ init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
+
+ req_desc.Words = 0;
+ req_desc.MFAIo.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cpu_to_le32s((u32 *)&req_desc.MFAIo);
+ req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);
+
+ /*
+ * disable the intr before firing the init frame
+ */
+ instance->instancet->disable_intr(instance);
+
+ for (i = 0; i < (10 * 1000); i += 20) {
+ if (readl(&instance->reg_set->doorbell) & 1)
+ msleep(20);
+ else
+ break;
+ }
+
+ instance->instancet->fire_cmd(instance, req_desc.u.low,
+ req_desc.u.high, instance->reg_set);
+
+ wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
+
+ frame_hdr = &cmd->frame->hdr;
+ if (frame_hdr->cmd_status != 0) {
+ ret = 1;
+ goto fail_fw_init;
+ }
+ printk(KERN_ERR "megasas:IOC Init cmd success\n");
+
+ ret = 0;
+
+fail_fw_init:
+ megasas_return_cmd(instance, cmd);
+ if (IOCInitMessage)
+ dma_free_coherent(&instance->pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ IOCInitMessage, ioc_init_handle);
+fail_get_cmd:
+ return ret;
+}
+
+/*
+ * megasas_get_ld_map_info - Returns FW's ld_map structure
+ * @instance: Adapter soft state
+ * @pend: Pend the command or not
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+static int
+megasas_get_ld_map_info(struct megasas_instance *instance)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_FW_RAID_MAP_ALL *ci;
+ dma_addr_t ci_h = 0;
+ u32 size_map_info;
+ struct fusion_context *fusion;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
+ return -ENOMEM;
+ }
+
+ fusion = instance->ctrl_context;
+
+ if (!fusion) {
+ megasas_return_cmd(instance, cmd);
+ return -ENXIO;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ size_map_info = sizeof(struct MR_FW_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+
+ ci = fusion->ld_map[(instance->map_id & 1)];
+ ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
+
+ if (!ci) {
+ printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ memset(ci, 0, sizeof(*ci));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ if (!megasas_issue_polled(instance, cmd))
+ ret = 0;
+ else {
+ printk(KERN_ERR "megasas: Get LD Map Info Failed\n");
+ ret = -1;
+ }
+
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+
+u8
+megasas_get_map_info(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ fusion->fast_path_io = 0;
+ if (!megasas_get_ld_map_info(instance)) {
+ if (MR_ValidateMapInfo(instance)) {
+ fusion->fast_path_io = 1;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * megasas_sync_map_info - Returns FW's ld_map structure
+ * @instance: Adapter soft state
+ *
+ * Issues an internal command (DCMD) to get the FW's controller PD
+ * list structure. This information is mainly used to find out SYSTEM
+ * supported by the FW.
+ */
+int
+megasas_sync_map_info(struct megasas_instance *instance)
+{
+ int ret = 0, i;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ u32 size_sync_info, num_lds;
+ struct fusion_context *fusion;
+ struct MR_LD_TARGET_SYNC *ci = NULL;
+ struct MR_FW_RAID_MAP_ALL *map;
+ struct MR_LD_RAID *raid;
+ struct MR_LD_TARGET_SYNC *ld_sync;
+ dma_addr_t ci_h = 0;
+ u32 size_map_info;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
+ "info.\n");
+ return -ENOMEM;
+ }
+
+ fusion = instance->ctrl_context;
+
+ if (!fusion) {
+ megasas_return_cmd(instance, cmd);
+ return 1;
+ }
+
+ map = fusion->ld_map[instance->map_id & 1];
+
+ num_lds = le32_to_cpu(map->raidMap.ldCount);
+
+ dcmd = &cmd->frame->dcmd;
+
+ size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ ci = (struct MR_LD_TARGET_SYNC *)
+ fusion->ld_map[(instance->map_id - 1) & 1];
+ memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL));
+
+ ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
+
+ ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
+
+ for (i = 0; i < num_lds; i++, ld_sync++) {
+ raid = MR_LdRaidGet(i, map);
+ ld_sync->targetId = MR_GetLDTgtId(i, map);
+ ld_sync->seqNum = raid->seqNum;
+ }
+
+ size_map_info = sizeof(struct MR_FW_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(size_map_info);
+ dcmd->mbox.b[0] = num_lds;
+ dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ instance->map_update_cmd = cmd;
+
+ instance->instancet->issue_dcmd(instance, cmd);
+
+ return ret;
+}
+
+/*
+ * meagasas_display_intel_branding - Display branding string
+ * @instance: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+megasas_display_intel_branding(struct megasas_instance *instance)
+{
+ if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_INVADER:
+ switch (instance->pdev->subsystem_device) {
+ case MEGARAID_INTEL_RS3DC080_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3DC080_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3DC040_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3DC040_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3SC008_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3SC008_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3MC044_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3MC044_BRANDING);
+ break;
+ default:
+ break;
+ }
+ break;
+ case PCI_DEVICE_ID_LSI_FURY:
+ switch (instance->pdev->subsystem_device) {
+ case MEGARAID_INTEL_RS3WC080_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3WC080_BRANDING);
+ break;
+ case MEGARAID_INTEL_RS3WC040_SSDID:
+ dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
+ instance->host->host_no,
+ MEGARAID_INTEL_RS3WC040_BRANDING);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * megasas_init_adapter_fusion - Initializes the FW
+ * @instance: Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+u32
+megasas_init_adapter_fusion(struct megasas_instance *instance)
+{
+ struct megasas_register_set __iomem *reg_set;
+ struct fusion_context *fusion;
+ u32 max_cmd;
+ int i = 0, count;
+
+ fusion = instance->ctrl_context;
+
+ reg_set = instance->reg_set;
+
+ /*
+ * Get various operational parameters from status register
+ */
+ instance->max_fw_cmds =
+ instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
+
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
+ /* Only internal cmds (DCMD) need to have MFI frames */
+ instance->max_mfi_cmds = MEGASAS_INT_CMDS;
+
+ max_cmd = instance->max_fw_cmds;
+
+ fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16;
+
+ fusion->request_alloc_sz =
+ sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
+ fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
+ *(fusion->reply_q_depth);
+ fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
+ (max_cmd + 1)); /* Extra 1 for SMID 0 */
+
+ fusion->max_sge_in_main_msg =
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
+
+ fusion->max_sge_in_chain =
+ MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
+
+ instance->max_num_sge = fusion->max_sge_in_main_msg +
+ fusion->max_sge_in_chain - 2;
+
+ /* Used for pass thru MFI frame (DCMD) */
+ fusion->chain_offset_mfi_pthru =
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
+
+ fusion->chain_offset_io_request =
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
+ sizeof(union MPI2_SGE_IO_UNION))/16;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count; i++)
+ fusion->last_reply_idx[i] = 0;
+
+ /*
+ * Allocate memory for descriptors
+ * Create a pool of commands
+ */
+ if (megasas_alloc_cmds(instance))
+ goto fail_alloc_mfi_cmds;
+ if (megasas_alloc_cmds_fusion(instance))
+ goto fail_alloc_cmds;
+
+ if (megasas_ioc_init_fusion(instance))
+ goto fail_ioc_init;
+
+ megasas_display_intel_branding(instance);
+
+ instance->flag_ieee = 1;
+
+ fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+
+ fusion->fast_path_io = 0;
+
+ for (i = 0; i < 2; i++) {
+ fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
+ fusion->map_sz,
+ &fusion->ld_map_phys[i],
+ GFP_KERNEL);
+ if (!fusion->ld_map[i]) {
+ printk(KERN_ERR "megasas: Could not allocate memory "
+ "for map info\n");
+ goto fail_map_info;
+ }
+ }
+
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+
+ return 0;
+
+fail_map_info:
+ if (i == 1)
+ dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
+ fusion->ld_map[0], fusion->ld_map_phys[0]);
+fail_ioc_init:
+ megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+ megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
+ return 1;
+}
+
+/**
+ * megasas_fire_cmd_fusion - Sends command to the FW
+ * @frame_phys_addr : Physical address of cmd
+ * @frame_count : Number of frames for the command
+ * @regs : MFI register set
+ */
+void
+megasas_fire_cmd_fusion(struct megasas_instance *instance,
+ dma_addr_t req_desc_lo,
+ u32 req_desc_hi,
+ struct megasas_register_set __iomem *regs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->hba_lock, flags);
+
+ writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
+ writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+}
+
+/**
+ * map_cmd_status - Maps FW cmd status to OS cmd status
+ * @cmd : Pointer to cmd
+ * @status : status of cmd returned by FW
+ * @ext_status : ext status of cmd returned by FW
+ */
+
+void
+map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
+{
+
+ switch (status) {
+
+ case MFI_STAT_OK:
+ cmd->scmd->result = DID_OK << 16;
+ break;
+
+ case MFI_STAT_SCSI_IO_FAILED:
+ case MFI_STAT_LD_INIT_IN_PROGRESS:
+ cmd->scmd->result = (DID_ERROR << 16) | ext_status;
+ break;
+
+ case MFI_STAT_SCSI_DONE_WITH_ERROR:
+
+ cmd->scmd->result = (DID_OK << 16) | ext_status;
+ if (ext_status == SAM_STAT_CHECK_CONDITION) {
+ memset(cmd->scmd->sense_buffer, 0,
+ SCSI_SENSE_BUFFERSIZE);
+ memcpy(cmd->scmd->sense_buffer, cmd->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ cmd->scmd->result |= DRIVER_SENSE << 24;
+ }
+ break;
+
+ case MFI_STAT_LD_OFFLINE:
+ case MFI_STAT_DEVICE_NOT_FOUND:
+ cmd->scmd->result = DID_BAD_TARGET << 16;
+ break;
+ case MFI_STAT_CONFIG_SEQ_MISMATCH:
+ cmd->scmd->result = DID_IMM_RETRY << 16;
+ break;
+ default:
+ printk(KERN_DEBUG "megasas: FW status %#x\n", status);
+ cmd->scmd->result = DID_ERROR << 16;
+ break;
+ }
+}
+
+/**
+ * megasas_make_sgl_fusion - Prepares 32-bit SGL
+ * @instance: Adapter soft state
+ * @scp: SCSI command from the mid-layer
+ * @sgl_ptr: SGL to be filled in
+ * @cmd: cmd we are working on
+ *
+ * If successful, this function returns the number of SG elements.
+ */
+static int
+megasas_make_sgl_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+ struct megasas_cmd_fusion *cmd)
+{
+ int i, sg_processed, sge_count;
+ struct scatterlist *os_sgl;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ sge_count = scsi_dma_map(scp);
+
+ BUG_ON(sge_count < 0);
+
+ if (sge_count > instance->max_num_sge || !sge_count)
+ return sge_count;
+
+ scsi_for_each_sg(scp, os_sgl, sge_count, i) {
+ sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
+ sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
+ sgl_ptr->Flags = 0;
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (i == sge_count - 1)
+ sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
+ }
+ sgl_ptr++;
+
+ sg_processed = i + 1;
+
+ if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
+ (sge_count > fusion->max_sge_in_main_msg)) {
+
+ struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY)) {
+ if ((le16_to_cpu(cmd->io_request->IoFlags) &
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+ cmd->io_request->ChainOffset =
+ fusion->
+ chain_offset_io_request;
+ else
+ cmd->io_request->ChainOffset = 0;
+ } else
+ cmd->io_request->ChainOffset =
+ fusion->chain_offset_io_request;
+
+ sg_chain = sgl_ptr;
+ /* Prepare chain element */
+ sg_chain->NextChainOffset = 0;
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY))
+ sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
+ else
+ sg_chain->Flags =
+ (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
+ sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
+ sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
+
+ sgl_ptr =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
+ }
+ }
+
+ return sge_count;
+}
+
+/**
+ * megasas_set_pd_lba - Sets PD LBA
+ * @cdb: CDB
+ * @cdb_len: cdb length
+ * @start_blk: Start block of IO
+ *
+ * Used to set the PD LBA in CDB for FP IOs
+ */
+void
+megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
+ struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
+ struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+{
+ struct MR_LD_RAID *raid;
+ u32 ld;
+ u64 start_blk = io_info->pdBlock;
+ u8 *cdb = io_request->CDB.CDB32;
+ u32 num_blocks = io_info->numBlocks;
+ u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
+
+ /* Check if T10 PI (DIF) is enabled for this LD */
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+ if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+ cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
+ cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
+
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
+ else
+ cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
+ cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
+
+ /* LBA */
+ cdb[12] = (u8)((start_blk >> 56) & 0xff);
+ cdb[13] = (u8)((start_blk >> 48) & 0xff);
+ cdb[14] = (u8)((start_blk >> 40) & 0xff);
+ cdb[15] = (u8)((start_blk >> 32) & 0xff);
+ cdb[16] = (u8)((start_blk >> 24) & 0xff);
+ cdb[17] = (u8)((start_blk >> 16) & 0xff);
+ cdb[18] = (u8)((start_blk >> 8) & 0xff);
+ cdb[19] = (u8)(start_blk & 0xff);
+
+ /* Logical block reference tag */
+ io_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(ref_tag);
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
+ io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
+
+ /* Transfer length */
+ cdb[28] = (u8)((num_blocks >> 24) & 0xff);
+ cdb[29] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[30] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[31] = (u8)(num_blocks & 0xff);
+
+ /* set SCSI IO EEDPFlags */
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+ io_request->EEDPFlags = cpu_to_le16(
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
+ } else {
+ io_request->EEDPFlags = cpu_to_le16(
+ MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
+ }
+ io_request->Control |= cpu_to_le32((0x4 << 26));
+ io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
+ } else {
+ /* Some drives don't support 16/12 byte CDB's, convert to 10 */
+ if (((cdb_len == 12) || (cdb_len == 16)) &&
+ (start_blk <= 0xffffffff)) {
+ if (cdb_len == 16) {
+ opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[14];
+ control = cdb[15];
+ } else {
+ opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[6] = groupnum;
+ cdb[9] = control;
+
+ /* Transfer length */
+ cdb[8] = (u8)(num_blocks & 0xff);
+ cdb[7] = (u8)((num_blocks >> 8) & 0xff);
+
+ io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
+ cdb_len = 10;
+ } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
+ /* Convert to 16 byte CDB for large LBA's */
+ switch (cdb_len) {
+ case 6:
+ opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
+ control = cdb[5];
+ break;
+ case 10:
+ opcode =
+ cdb[0] == READ_10 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[6];
+ control = cdb[9];
+ break;
+ case 12:
+ opcode =
+ cdb[0] == READ_12 ? READ_16 : WRITE_16;
+ flagvals = cdb[1];
+ groupnum = cdb[10];
+ control = cdb[11];
+ break;
+ }
+
+ memset(cdb, 0, sizeof(io_request->CDB.CDB32));
+
+ cdb[0] = opcode;
+ cdb[1] = flagvals;
+ cdb[14] = groupnum;
+ cdb[15] = control;
+
+ /* Transfer length */
+ cdb[13] = (u8)(num_blocks & 0xff);
+ cdb[12] = (u8)((num_blocks >> 8) & 0xff);
+ cdb[11] = (u8)((num_blocks >> 16) & 0xff);
+ cdb[10] = (u8)((num_blocks >> 24) & 0xff);
+
+ io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
+ cdb_len = 16;
+ }
+
+ /* Normal case, just load LBA here */
+ switch (cdb_len) {
+ case 6:
+ {
+ u8 val = cdb[1] & 0xE0;
+ cdb[3] = (u8)(start_blk & 0xff);
+ cdb[2] = (u8)((start_blk >> 8) & 0xff);
+ cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
+ break;
+ }
+ case 10:
+ cdb[5] = (u8)(start_blk & 0xff);
+ cdb[4] = (u8)((start_blk >> 8) & 0xff);
+ cdb[3] = (u8)((start_blk >> 16) & 0xff);
+ cdb[2] = (u8)((start_blk >> 24) & 0xff);
+ break;
+ case 12:
+ cdb[5] = (u8)(start_blk & 0xff);
+ cdb[4] = (u8)((start_blk >> 8) & 0xff);
+ cdb[3] = (u8)((start_blk >> 16) & 0xff);
+ cdb[2] = (u8)((start_blk >> 24) & 0xff);
+ break;
+ case 16:
+ cdb[9] = (u8)(start_blk & 0xff);
+ cdb[8] = (u8)((start_blk >> 8) & 0xff);
+ cdb[7] = (u8)((start_blk >> 16) & 0xff);
+ cdb[6] = (u8)((start_blk >> 24) & 0xff);
+ cdb[5] = (u8)((start_blk >> 32) & 0xff);
+ cdb[4] = (u8)((start_blk >> 40) & 0xff);
+ cdb[3] = (u8)((start_blk >> 48) & 0xff);
+ cdb[2] = (u8)((start_blk >> 56) & 0xff);
+ break;
+ }
+ }
+}
+
+/**
+ * megasas_build_ldio_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Prepares the io_request and chain elements (sg_frame) for IO
+ * The IO can be for PD (Fast Path) or LD
+ */
+void
+megasas_build_ldio_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct megasas_cmd_fusion *cmd)
+{
+ u8 fp_possible;
+ u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct IO_REQUEST_INFO io_info;
+ struct fusion_context *fusion;
+ struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ u8 *raidLUN;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+ fusion = instance->ctrl_context;
+
+ io_request = cmd->io_request;
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
+ io_request->RaidContext.status = 0;
+ io_request->RaidContext.exStatus = 0;
+
+ req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+ fp_possible = 0;
+
+ /*
+ * 6-byte READ(0x08) or WRITE(0x0A) cdb
+ */
+ if (scp->cmd_len == 6) {
+ datalength = (u32) scp->cmnd[4];
+ start_lba_lo = ((u32) scp->cmnd[1] << 16) |
+ ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /*
+ * 10-byte READ(0x28) or WRITE(0x2A) cdb
+ */
+ else if (scp->cmd_len == 10) {
+ datalength = (u32) scp->cmnd[8] |
+ ((u32) scp->cmnd[7] << 8);
+ start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 12-byte READ(0xA8) or WRITE(0xAA) cdb
+ */
+ else if (scp->cmd_len == 12) {
+ datalength = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+ start_lba_lo = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ /*
+ * 16-byte READ(0x88) or WRITE(0x8A) cdb
+ */
+ else if (scp->cmd_len == 16) {
+ datalength = ((u32) scp->cmnd[10] << 24) |
+ ((u32) scp->cmnd[11] << 16) |
+ ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
+ start_lba_lo = ((u32) scp->cmnd[6] << 24) |
+ ((u32) scp->cmnd[7] << 16) |
+ ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
+
+ start_lba_hi = ((u32) scp->cmnd[2] << 24) |
+ ((u32) scp->cmnd[3] << 16) |
+ ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
+ }
+
+ memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
+ io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
+ io_info.numBlocks = datalength;
+ io_info.ldTgtId = device_id;
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
+
+ if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ io_info.isRead = 1;
+
+ local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+
+ if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
+ MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) {
+ io_request->RaidContext.regLockFlags = 0;
+ fp_possible = 0;
+ } else {
+ if (MR_BuildRaidContext(instance, &io_info,
+ &io_request->RaidContext,
+ local_map_ptr, &raidLUN))
+ fp_possible = io_info.fpOkForIo;
+ }
+
+ /* Use smp_processor_id() for now until cmd->request->cpu is CPU
+ id by default, not CPU group id, otherwise all MSI-X queues won't
+ be utilized */
+ cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
+ smp_processor_id() % instance->msix_vectors : 0;
+
+ if (fp_possible) {
+ megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
+ local_map_ptr, start_lba_lo);
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+ << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ }
+ if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
+ (io_info.isRead)) {
+ io_info.devHandle =
+ get_updated_dev_handle(
+ &fusion->load_balance_info[device_id],
+ &io_info);
+ scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ } else
+ scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
+ io_request->DevHandle = io_info.devHandle;
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raidLUN, 8);
+ } else {
+ io_request->RaidContext.timeoutValue =
+ cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
+ << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ if (io_request->RaidContext.regLockFlags ==
+ REGION_TYPE_UNUSED)
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
+ MR_RL_FLAGS_SEQ_NUM_ENABLE);
+ io_request->RaidContext.nseg = 0x1;
+ }
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = cpu_to_le16(device_id);
+ } /* Not FP */
+}
+
+/**
+ * megasas_build_dcdb_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Prepares the io_request frame for non-io cmds
+ */
+static void
+megasas_build_dcdb_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd,
+ struct megasas_cmd_fusion *cmd)
+{
+ u32 device_id;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ u16 pd_index = 0;
+ struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ struct fusion_context *fusion = instance->ctrl_context;
+ u8 span, physArm;
+ u16 devHandle;
+ u32 ld, arRef, pd;
+ struct MR_LD_RAID *raid;
+ struct RAID_CONTEXT *pRAID_Context;
+
+ io_request = cmd->io_request;
+ device_id = MEGASAS_DEV_INDEX(instance, scmd);
+ pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+ +scmd->device->id;
+ local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+
+ io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+
+
+ /* Check if this is a system PD I/O */
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
+ instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+ io_request->Function = 0;
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ io_request->RaidContext.timeoutValue =
+ local_map_ptr->raidMap.fpPdIoTimeoutSec;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+ io_request->RaidContext.RAIDFlags =
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
+ MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ io_request->IoFlags |= cpu_to_le16(
+ MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
+ /*
+ * If the command is for the tape device, set the
+ * FP timeout to the os layer timeout value.
+ */
+ if (scmd->device->type == TYPE_TAPE) {
+ if ((scmd->request->timeout / HZ) > 0xFFFF)
+ io_request->RaidContext.timeoutValue =
+ 0xFFFF;
+ else
+ io_request->RaidContext.timeoutValue =
+ scmd->request->timeout / HZ;
+ }
+ } else {
+ if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS)
+ goto NonFastPath;
+
+ ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+ if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+ goto NonFastPath;
+
+ raid = MR_LdRaidGet(ld, local_map_ptr);
+
+ /* check if this LD is FP capable */
+ if (!(raid->capability.fpNonRWCapable))
+ /* not FP capable, send as non-FP */
+ goto NonFastPath;
+
+ /* get RAID_Context pointer */
+ pRAID_Context = &io_request->RaidContext;
+
+ /* set RAID context values */
+ pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
+ pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd;
+ pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+ pRAID_Context->regLockRowLBA = 0;
+ pRAID_Context->regLockLength = 0;
+ pRAID_Context->configSeqNum = raid->seqNum;
+
+ /* get the DevHandle for the PD (since this is
+ fpNonRWCapable, this is a single disk RAID0) */
+ span = physArm = 0;
+ arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
+ pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
+ devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
+
+ /* build request descriptor */
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ cmd->request_desc->SCSIIO.DevHandle = devHandle;
+
+ /* populate the LUN field */
+ memcpy(io_request->LUN, raid->LUN, 8);
+
+ /* build the raidScsiIO structure */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->DevHandle = devHandle;
+
+ return;
+
+NonFastPath:
+ io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = cpu_to_le16(device_id);
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ }
+ io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
+ io_request->LUN[1] = scmd->device->lun;
+}
+
+/**
+ * megasas_build_io_fusion - Prepares IOs to devices
+ * @instance: Adapter soft state
+ * @scp: SCSI command
+ * @cmd: Command to be prepared
+ *
+ * Invokes helper functions to prepare request frames
+ * and sets flags appropriate for IO/Non-IO cmd
+ */
+int
+megasas_build_io_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scp,
+ struct megasas_cmd_fusion *cmd)
+{
+ u32 device_id, sge_count;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+
+ device_id = MEGASAS_DEV_INDEX(instance, scp);
+
+ /* Zero out some fields so they don't get reused */
+ io_request->LUN[1] = 0;
+ io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
+ io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
+ io_request->EEDPFlags = 0;
+ io_request->Control = 0;
+ io_request->EEDPBlockSize = 0;
+ io_request->ChainOffset = 0;
+ io_request->RaidContext.RAIDFlags = 0;
+ io_request->RaidContext.Type = 0;
+ io_request->RaidContext.nseg = 0;
+
+ memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
+ /*
+ * Just the CDB length,rest of the Flags are zero
+ * This will be modified for FP in build_ldio_fusion
+ */
+ io_request->IoFlags = cpu_to_le16(scp->cmd_len);
+
+ if (megasas_is_ldio(scp))
+ megasas_build_ldio_fusion(instance, scp, cmd);
+ else
+ megasas_build_dcdb_fusion(instance, scp, cmd);
+
+ /*
+ * Construct SGL
+ */
+
+ sge_count =
+ megasas_make_sgl_fusion(instance, scp,
+ (struct MPI25_IEEE_SGE_CHAIN64 *)
+ &io_request->SGL, cmd);
+
+ if (sge_count > instance->max_num_sge) {
+ printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
+ "max (0x%x) allowed\n", sge_count,
+ instance->max_num_sge);
+ return 1;
+ }
+
+ io_request->RaidContext.numSGE = sge_count;
+
+ io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
+
+ if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
+ else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
+
+ io_request->SGLOffset0 =
+ offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
+
+ io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+
+ cmd->scmd = scp;
+ scp->SCp.ptr = (char *)cmd;
+
+ return 0;
+}
+
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
+{
+ u8 *p;
+ struct fusion_context *fusion;
+
+ if (index >= instance->max_fw_cmds) {
+ printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
+ "descriptor for scsi%d\n", index,
+ instance->host->host_no);
+ return NULL;
+ }
+ fusion = instance->ctrl_context;
+ p = fusion->req_frames_desc
+ +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
+
+ return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
+}
+
+/**
+ * megasas_build_and_issue_cmd_fusion -Main routine for building and
+ * issuing non IOCTL cmd
+ * @instance: Adapter soft state
+ * @scmd: pointer to scsi cmd from OS
+ */
+static u32
+megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ struct megasas_cmd_fusion *cmd;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u32 index;
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ cmd = megasas_get_cmd_fusion(instance);
+ if (!cmd)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ index = cmd->index;
+
+ req_desc = megasas_get_request_descriptor(instance, index-1);
+ if (!req_desc)
+ return 1;
+
+ req_desc->Words = 0;
+ cmd->request_desc = req_desc;
+
+ if (megasas_build_io_fusion(instance, scmd, cmd)) {
+ megasas_return_cmd_fusion(instance, cmd);
+ printk(KERN_ERR "megasas: Error building command.\n");
+ cmd->request_desc = NULL;
+ return 1;
+ }
+
+ req_desc = cmd->request_desc;
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+ if (cmd->io_request->ChainOffset != 0 &&
+ cmd->io_request->ChainOffset != 0xF)
+ printk(KERN_ERR "megasas: The chain offset value is not "
+ "correct : %x\n", cmd->io_request->ChainOffset);
+
+ /*
+ * Issue the command to the FW
+ */
+ atomic_inc(&instance->fw_outstanding);
+
+ instance->instancet->fire_cmd(instance,
+ req_desc->u.low, req_desc->u.high,
+ instance->reg_set);
+
+ return 0;
+}
+
+/**
+ * complete_cmd_fusion - Completes command
+ * @instance: Adapter soft state
+ * Completes all commands that is in reply descriptor queue
+ */
+int
+complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
+{
+ union MPI2_REPLY_DESCRIPTORS_UNION *desc;
+ struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ struct megasas_cmd_fusion *cmd_fusion;
+ u16 smid, num_completed;
+ u8 reply_descript_type, arm;
+ u32 status, extStatus, device_id;
+ union desc_value d_val;
+ struct LD_LOAD_BALANCE_INFO *lbinfo;
+
+ fusion = instance->ctrl_context;
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ return IRQ_HANDLED;
+
+ desc = fusion->reply_frames_desc;
+ desc += ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
+ fusion->last_reply_idx[MSIxIndex];
+
+ reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ d_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags &
+ MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ return IRQ_NONE;
+
+ num_completed = 0;
+
+ while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) {
+ smid = le16_to_cpu(reply_desc->SMID);
+
+ cmd_fusion = fusion->cmd_list[smid - 1];
+
+ scsi_io_req =
+ (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+
+ if (cmd_fusion->scmd)
+ cmd_fusion->scmd->SCp.ptr = NULL;
+
+ status = scsi_io_req->RaidContext.status;
+ extStatus = scsi_io_req->RaidContext.exStatus;
+
+ switch (scsi_io_req->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
+ /* Update load balancing info */
+ device_id = MEGASAS_DEV_INDEX(instance,
+ cmd_fusion->scmd);
+ lbinfo = &fusion->load_balance_info[device_id];
+ if (cmd_fusion->scmd->SCp.Status &
+ MEGASAS_LOAD_BALANCE_FLAG) {
+ arm = lbinfo->raid1DevHandle[0] ==
+ cmd_fusion->io_request->DevHandle ? 0 :
+ 1;
+ atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+ cmd_fusion->scmd->SCp.Status &=
+ ~MEGASAS_LOAD_BALANCE_FLAG;
+ }
+ if (reply_descript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ if (megasas_dbg_lvl == 5)
+ printk(KERN_ERR "\nmegasas: FAST Path "
+ "IO Success\n");
+ }
+ /* Fall thru and complete IO */
+ case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
+ /* Map the FW Cmd Status */
+ map_cmd_status(cmd_fusion, status, extStatus);
+ scsi_dma_unmap(cmd_fusion->scmd);
+ cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+ scsi_io_req->RaidContext.status = 0;
+ scsi_io_req->RaidContext.exStatus = 0;
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ atomic_dec(&instance->fw_outstanding);
+
+ break;
+ case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ cmd_fusion->flags = 0;
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+
+ break;
+ }
+
+ fusion->last_reply_idx[MSIxIndex]++;
+ if (fusion->last_reply_idx[MSIxIndex] >=
+ fusion->reply_q_depth)
+ fusion->last_reply_idx[MSIxIndex] = 0;
+
+ desc->Words = ULLONG_MAX;
+ num_completed++;
+
+ /* Get the next reply descriptor */
+ if (!fusion->last_reply_idx[MSIxIndex])
+ desc = fusion->reply_frames_desc +
+ ((MSIxIndex * fusion->reply_alloc_sz)/
+ sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
+ else
+ desc++;
+
+ reply_desc =
+ (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
+
+ d_val.word = desc->Words;
+
+ reply_descript_type = reply_desc->ReplyFlags &
+ MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+
+ if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ break;
+ }
+
+ if (!num_completed)
+ return IRQ_NONE;
+
+ wmb();
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ megasas_check_and_restore_queue_depth(instance);
+ return IRQ_HANDLED;
+}
+
+/**
+ * megasas_complete_cmd_dpc_fusion - Completes command
+ * @instance: Adapter soft state
+ *
+ * Tasklet to complete cmds
+ */
+void
+megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
+{
+ struct megasas_instance *instance =
+ (struct megasas_instance *)instance_addr;
+ unsigned long flags;
+ u32 count, MSIxIndex;
+
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+ /* If we have already declared adapter dead, donot complete cmds */
+ spin_lock_irqsave(&instance->hba_lock, flags);
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&instance->hba_lock, flags);
+
+ for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
+ complete_cmd_fusion(instance, MSIxIndex);
+}
+
+/**
+ * megasas_isr_fusion - isr entry point
+ */
+irqreturn_t megasas_isr_fusion(int irq, void *devp)
+{
+ struct megasas_irq_context *irq_context = devp;
+ struct megasas_instance *instance = irq_context->instance;
+ u32 mfiStatus, fw_state;
+
+ if (instance->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!instance->msix_vectors) {
+ mfiStatus = instance->instancet->clear_intr(instance->reg_set);
+ if (!mfiStatus)
+ return IRQ_NONE;
+ }
+
+ /* If we are resetting, bail */
+ if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
+ instance->instancet->clear_intr(instance->reg_set);
+ return IRQ_HANDLED;
+ }
+
+ if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
+ instance->instancet->clear_intr(instance->reg_set);
+ /* If we didn't complete any commands, check for FW fault */
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+ "for scsi%d\n", instance->host->host_no);
+ schedule_work(&instance->work_init);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
+ * @instance: Adapter soft state
+ * mfi_cmd: megasas_cmd pointer
+ *
+ */
+u8
+build_mpt_mfi_pass_thru(struct megasas_instance *instance,
+ struct megasas_cmd *mfi_cmd)
+{
+ struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
+ struct megasas_cmd_fusion *cmd;
+ struct fusion_context *fusion;
+ struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+
+ cmd = megasas_get_cmd_fusion(instance);
+ if (!cmd)
+ return 1;
+
+ /* Save the smid. To be used for returning the cmd */
+ mfi_cmd->context.smid = cmd->index;
+
+ cmd->sync_cmd_idx = mfi_cmd->index;
+
+ /*
+ * For cmds where the flag is set, store the flag and check
+ * on completion. For cmds with this flag, don't call
+ * megasas_complete_cmd
+ */
+
+ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
+ cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
+
+ fusion = instance->ctrl_context;
+ io_req = cmd->io_request;
+
+ if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
+ struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
+ sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
+ sgl_ptr_end->Flags = 0;
+ }
+
+ mpi25_ieee_chain =
+ (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
+
+ io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
+ io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
+ SGL) / 4;
+ io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
+
+ mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
+
+ mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
+
+ mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME);
+
+ return 0;
+}
+
+/**
+ * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
+ * @instance: Adapter soft state
+ * @cmd: mfi cmd to build
+ *
+ */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u16 index;
+
+ if (build_mpt_mfi_pass_thru(instance, cmd)) {
+ printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
+ return NULL;
+ }
+
+ index = cmd->context.smid;
+
+ req_desc = megasas_get_request_descriptor(instance, index - 1);
+
+ if (!req_desc)
+ return NULL;
+
+ req_desc->Words = 0;
+ req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+
+ req_desc->SCSIIO.SMID = cpu_to_le16(index);
+
+ return req_desc;
+}
+
+/**
+ * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
+ * @instance: Adapter soft state
+ * @cmd: mfi cmd pointer
+ *
+ */
+void
+megasas_issue_dcmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd *cmd)
+{
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+
+ req_desc = build_mpt_cmd(instance, cmd);
+ if (!req_desc) {
+ printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
+ return;
+ }
+ instance->instancet->fire_cmd(instance, req_desc->u.low,
+ req_desc->u.high, instance->reg_set);
+}
+
+/**
+ * megasas_release_fusion - Reverses the FW initialization
+ * @intance: Adapter soft state
+ */
+void
+megasas_release_fusion(struct megasas_instance *instance)
+{
+ megasas_free_cmds(instance);
+ megasas_free_cmds_fusion(instance);
+
+ iounmap(instance->reg_set);
+
+ pci_release_selected_regions(instance->pdev, instance->bar);
+}
+
+/**
+ * megasas_read_fw_status_reg_fusion - returns the current FW status value
+ * @regs: MFI register set
+ */
+static u32
+megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
+{
+ return readl(&(regs)->outbound_scratch_pad);
+}
+
+/**
+ * megasas_adp_reset_fusion - For controller reset
+ * @regs: MFI register set
+ */
+static int
+megasas_adp_reset_fusion(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ return 0;
+}
+
+/**
+ * megasas_check_reset_fusion - For controller reset check
+ * @regs: MFI register set
+ */
+static int
+megasas_check_reset_fusion(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *regs)
+{
+ return 0;
+}
+
+/* This function waits for outstanding commands on fusion to complete */
+int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ int iotimeout, int *convert)
+{
+ int i, outstanding, retval = 0, hb_seconds_missed = 0;
+ u32 fw_state;
+
+ for (i = 0; i < resetwaittime; i++) {
+ /* Check if firmware is in fault state */
+ fw_state = instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ if (fw_state == MFI_STATE_FAULT) {
+ printk(KERN_WARNING "megasas: Found FW in FAULT state,"
+ " will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ /* If SR-IOV VF mode & heartbeat timeout, don't wait */
+ if (instance->requestorId && !iotimeout) {
+ retval = 1;
+ goto out;
+ }
+
+ /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
+ if (instance->requestorId && iotimeout) {
+ if (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ hb_seconds_missed = 0;
+ } else {
+ hb_seconds_missed++;
+ if (hb_seconds_missed ==
+ (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ " Heartbeat never completed "
+ " while polling during I/O "
+ " timeout handling for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ *convert = 1;
+ retval = 1;
+ goto out;
+ }
+ }
+ }
+
+ outstanding = atomic_read(&instance->fw_outstanding);
+ if (!outstanding)
+ goto out;
+
+ if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
+ printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+ "commands to complete for scsi%d\n", i,
+ outstanding, instance->host->host_no);
+ megasas_complete_cmd_dpc_fusion(
+ (unsigned long)instance);
+ }
+ msleep(1000);
+ }
+
+ if (atomic_read(&instance->fw_outstanding)) {
+ printk("megaraid_sas: pending commands remain after waiting, "
+ "will reset adapter scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ }
+out:
+ return retval;
+}
+
+void megasas_reset_reply_desc(struct megasas_instance *instance)
+{
+ int i, count;
+ struct fusion_context *fusion;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+
+ fusion = instance->ctrl_context;
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ for (i = 0 ; i < count ; i++)
+ fusion->last_reply_idx[i] = 0;
+ reply_desc = fusion->reply_frames_desc;
+ for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+}
+
+/* Check for a second path that is currently UP */
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+ struct scsi_cmnd *scmd)
+{
+ int i, j, retval = (DID_RESET << 16);
+
+ if (instance->mpio && instance->requestorId) {
+ for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
+ for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
+ if (megasas_mgmt_info.instance[i] &&
+ (megasas_mgmt_info.instance[i] != instance) &&
+ megasas_mgmt_info.instance[i]->mpio &&
+ megasas_mgmt_info.instance[i]->requestorId
+ &&
+ (megasas_mgmt_info.instance[i]->ld_ids[j]
+ == scmd->device->id)) {
+ retval = (DID_NO_CONNECT << 16);
+ goto out;
+ }
+ }
+out:
+ return retval;
+}
+
+/* Core fusion reset function */
+int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+{
+ int retval = SUCCESS, i, j, retry = 0, convert = 0;
+ struct megasas_instance *instance;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd_mfi;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u32 host_diag, abs_state, status_reg, reset_adapter;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+ fusion = instance->ctrl_context;
+
+ mutex_lock(&instance->reset_mutex);
+
+ if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
+ "returning FAILED for scsi%d.\n",
+ instance->host->host_no);
+ return FAILED;
+ }
+
+ if (instance->requestorId && !instance->skip_heartbeat_timer_del)
+ del_timer_sync(&instance->sriov_heartbeat_timer);
+ set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
+ instance->instancet->disable_intr(instance);
+ msleep(1000);
+
+ /* First try waiting for commands to complete */
+ if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+ &convert)) {
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ printk(KERN_WARNING "megaraid_sas: resetting fusion "
+ "adapter scsi%d.\n", instance->host->host_no);
+ if (convert)
+ iotimeout = 0;
+
+ /* Now return commands back to the OS */
+ for (i = 0 ; i < instance->max_fw_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->scmd) {
+ scsi_dma_unmap(cmd_fusion->scmd);
+ cmd_fusion->scmd->result =
+ megasas_check_mpio_paths(instance,
+ cmd_fusion->scmd);
+ cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ atomic_dec(&instance->fw_outstanding);
+ }
+ }
+
+ status_reg = instance->instancet->read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+ reset_adapter = status_reg & MFI_RESET_ADAPTER;
+ if (instance->disableOnlineCtrlReset ||
+ (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
+ /* Reset not supported, kill adapter */
+ printk(KERN_WARNING "megaraid_sas: Reset not supported"
+ ", killing adapter scsi%d.\n",
+ instance->host->host_no);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ retval = FAILED;
+ goto out;
+ }
+
+ /* Let SR-IOV VF & PF sync up if there was a HB failure */
+ if (instance->requestorId && !iotimeout) {
+ msleep(MEGASAS_OCR_SETTLE_TIME_VF);
+ /* Look for a late HB update after VF settle time */
+ if (abs_state == MFI_STATE_OPERATIONAL &&
+ (instance->hb_host_mem->HB.fwCounter !=
+ instance->hb_host_mem->HB.driverCounter)) {
+ instance->hb_host_mem->HB.driverCounter =
+ instance->hb_host_mem->HB.fwCounter;
+ printk(KERN_WARNING "megasas: SR-IOV:"
+ "Late FW heartbeat update for "
+ "scsi%d.\n",
+ instance->host->host_no);
+ } else {
+ /* In VF mode, first poll for FW ready */
+ for (i = 0;
+ i < (MEGASAS_RESET_WAIT_TIME * 1000);
+ i += 20) {
+ status_reg =
+ instance->instancet->
+ read_fw_status_reg(
+ instance->reg_set);
+ abs_state = status_reg &
+ MFI_STATE_MASK;
+ if (abs_state == MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas"
+ ": SR-IOV: FW was found"
+ "to be in ready state "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ break;
+ }
+ msleep(20);
+ }
+ if (abs_state != MFI_STATE_READY) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "FW not in ready state after %d"
+ " seconds for scsi%d, status_reg = "
+ "0x%x.\n",
+ MEGASAS_RESET_WAIT_TIME,
+ instance->host->host_no,
+ status_reg);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery =
+ MEGASAS_HW_CRITICAL_ERROR;
+ retval = FAILED;
+ goto out;
+ }
+ }
+ }
+
+ /* Now try to reset the chip */
+ for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE,
+ &instance->reg_set->fusion_seq_offset);
+
+ /* Check that the diag write enable (DRWE) bit is on */
+ host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
+ while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
+ msleep(100);
+ host_diag =
+ readl(&instance->reg_set->fusion_host_diag);
+ if (retry++ == 100) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "Host diag unlock failed! "
+ "for scsi%d\n",
+ instance->host->host_no);
+ break;
+ }
+ }
+ if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
+ continue;
+
+ /* Send chip reset command */
+ writel(host_diag | HOST_DIAG_RESET_ADAPTER,
+ &instance->reg_set->fusion_host_diag);
+ msleep(3000);
+
+ /* Make sure reset adapter bit is cleared */
+ host_diag = readl(&instance->reg_set->fusion_host_diag);
+ retry = 0;
+ while (host_diag & HOST_DIAG_RESET_ADAPTER) {
+ msleep(100);
+ host_diag =
+ readl(&instance->reg_set->fusion_host_diag);
+ if (retry++ == 1000) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "Diag reset adapter never "
+ "cleared for scsi%d!\n",
+ instance->host->host_no);
+ break;
+ }
+ }
+ if (host_diag & HOST_DIAG_RESET_ADAPTER)
+ continue;
+
+ abs_state =
+ instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ retry = 0;
+
+ while ((abs_state <= MFI_STATE_FW_INIT) &&
+ (retry++ < 1000)) {
+ msleep(100);
+ abs_state =
+ instance->instancet->read_fw_status_reg(
+ instance->reg_set) & MFI_STATE_MASK;
+ }
+ if (abs_state <= MFI_STATE_FW_INIT) {
+ printk(KERN_WARNING "megaraid_sas: firmware "
+ "state < MFI_STATE_FW_INIT, state = "
+ "0x%x for scsi%d\n", abs_state,
+ instance->host->host_no);
+ continue;
+ }
+
+ /* Wait for FW to become ready */
+ if (megasas_transition_to_ready(instance, 1)) {
+ printk(KERN_WARNING "megaraid_sas: Failed to "
+ "transition controller to ready "
+ "for scsi%d.\n",
+ instance->host->host_no);
+ continue;
+ }
+
+ megasas_reset_reply_desc(instance);
+ if (megasas_ioc_init_fusion(instance)) {
+ printk(KERN_WARNING "megaraid_sas: "
+ "megasas_ioc_init_fusion() failed!"
+ " for scsi%d\n",
+ instance->host->host_no);
+ continue;
+ }
+
+ /* Re-fire management commands */
+ for (j = 0 ; j < instance->max_fw_cmds; j++) {
+ cmd_fusion = fusion->cmd_list[j];
+ if (cmd_fusion->sync_cmd_idx !=
+ (u32)ULONG_MAX) {
+ cmd_mfi =
+ instance->
+ cmd_list[cmd_fusion->sync_cmd_idx];
+ if (cmd_mfi->frame->dcmd.opcode ==
+ cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
+ megasas_return_cmd(instance,
+ cmd_mfi);
+ megasas_return_cmd_fusion(
+ instance, cmd_fusion);
+ } else {
+ req_desc =
+ megasas_get_request_descriptor(
+ instance,
+ cmd_mfi->context.smid
+ -1);
+ if (!req_desc) {
+ printk(KERN_WARNING
+ "req_desc NULL"
+ " for scsi%d\n",
+ instance->host->host_no);
+ /* Return leaked MPT
+ frame */
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ } else {
+ instance->instancet->
+ fire_cmd(instance,
+ req_desc->
+ u.low,
+ req_desc->
+ u.high,
+ instance->
+ reg_set);
+ }
+ }
+ }
+ }
+
+ clear_bit(MEGASAS_FUSION_IN_RESET,
+ &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+
+ /* Reset load balance info */
+ memset(fusion->load_balance_info, 0,
+ sizeof(struct LD_LOAD_BALANCE_INFO)
+ *MAX_LOGICAL_DRIVES);
+
+ if (!megasas_get_map_info(instance))
+ megasas_sync_map_info(instance);
+
+ /* Restart SR-IOV heartbeat */
+ if (instance->requestorId) {
+ if (!megasas_sriov_start_heartbeat(instance, 0))
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ else
+ instance->skip_heartbeat_timer_del = 1;
+ }
+
+ /* Adapter reset completed successfully */
+ printk(KERN_WARNING "megaraid_sas: Reset "
+ "successful for scsi%d.\n",
+ instance->host->host_no);
+ retval = SUCCESS;
+ goto out;
+ }
+ /* Reset failed, kill the adapter */
+ printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
+ "adapter scsi%d.\n", instance->host->host_no);
+ megaraid_sas_kill_hba(instance);
+ instance->skip_heartbeat_timer_del = 1;
+ instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ retval = FAILED;
+ } else {
+ /* For VF: Restart HB timer if we didn't OCR */
+ if (instance->requestorId) {
+ megasas_start_timer(instance,
+ &instance->sriov_heartbeat_timer,
+ megasas_sriov_heartbeat_handler,
+ MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ }
+ clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ instance->instancet->enable_intr(instance);
+ instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ }
+out:
+ clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
+ mutex_unlock(&instance->reset_mutex);
+ return retval;
+}
+
+/* Fusion OCR work queue */
+void megasas_fusion_ocr_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, work_init);
+
+ megasas_reset_fusion(instance->host, 0);
+}
+
+struct megasas_instance_template megasas_instance_template_fusion = {
+ .fire_cmd = megasas_fire_cmd_fusion,
+ .enable_intr = megasas_enable_intr_fusion,
+ .disable_intr = megasas_disable_intr_fusion,
+ .clear_intr = megasas_clear_intr_fusion,
+ .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
+ .adp_reset = megasas_adp_reset_fusion,
+ .check_reset = megasas_check_reset_fusion,
+ .service_isr = megasas_isr_fusion,
+ .tasklet = megasas_complete_cmd_dpc_fusion,
+ .init_adapter = megasas_init_adapter_fusion,
+ .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
+ .issue_dcmd = megasas_issue_dcmd_fusion,
+};
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
new file mode 100644
index 00000000000..e76af5459a0
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -0,0 +1,766 @@
+/*
+ * Linux MegaRAID driver for SAS based RAID controllers
+ *
+ * Copyright (c) 2009-2012 LSI Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * FILE: megaraid_sas_fusion.h
+ *
+ * Authors: LSI Corporation
+ * Manoj Jose
+ * Sumant Patro
+ *
+ * Send feedback to: <megaraidlinux@lsi.com>
+ *
+ * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
+ * ATTN: Linuxraid
+ */
+
+#ifndef _MEGARAID_SAS_FUSION_H_
+#define _MEGARAID_SAS_FUSION_H_
+
+/* Fusion defines */
+#define MEGASAS_MAX_SZ_CHAIN_FRAME 1024
+#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
+#define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
+#define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
+#define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
+#define MEGASAS_LOAD_BALANCE_FLAG 0x1
+#define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
+#define HOST_DIAG_WRITE_ENABLE 0x80
+#define HOST_DIAG_RESET_ADAPTER 0x4
+#define MEGASAS_FUSION_MAX_RESET_TRIES 3
+#define MAX_MSIX_QUEUES_FUSION 128
+
+/* Invader defines */
+#define MPI2_TYPE_CUDA 0x2
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
+#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
+#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
+#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
+
+/* T10 PI defines */
+#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
+#define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
+#define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
+#define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
+#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
+#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
+#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
+
+#define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+
+/*
+ * Raid context flags
+ */
+
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
+#define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
+enum MR_RAID_FLAGS_IO_SUB_TYPE {
+ MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
+ MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+};
+
+/*
+ * Request descriptor types
+ */
+#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
+#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
+#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
+#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
+
+#define MEGASAS_FP_CMD_LEN 16
+#define MEGASAS_FUSION_IN_RESET 0
+
+/*
+ * Raid Context structure which describes MegaRAID specific IO Parameters
+ * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
+ */
+
+struct RAID_CONTEXT {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 nseg:4;
+ u8 Type:4;
+#else
+ u8 Type:4;
+ u8 nseg:4;
+#endif
+ u8 resvd0;
+ u16 timeoutValue;
+ u8 regLockFlags;
+ u8 resvd1;
+ u16 VirtualDiskTgtId;
+ u64 regLockRowLBA;
+ u32 regLockLength;
+ u16 nextLMId;
+ u8 exStatus;
+ u8 status;
+ u8 RAIDFlags;
+ u8 numSGE;
+ u16 configSeqNum;
+ u8 spanArm;
+ u8 resvd2[3];
+};
+
+#define RAID_CTX_SPANARM_ARM_SHIFT (0)
+#define RAID_CTX_SPANARM_ARM_MASK (0x1f)
+
+#define RAID_CTX_SPANARM_SPAN_SHIFT (5)
+#define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
+
+/*
+ * define region lock types
+ */
+enum REGION_TYPE {
+ REGION_TYPE_UNUSED = 0,
+ REGION_TYPE_SHARED_READ = 1,
+ REGION_TYPE_SHARED_WRITE = 2,
+ REGION_TYPE_EXCLUSIVE = 3,
+};
+
+/* MPI2 defines */
+#define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_VERSION_MAJOR (0x02)
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+ MPI2_HEADER_VERSION_DEV)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+struct MPI25_IEEE_SGE_CHAIN64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 NextChainOffset;
+ u8 Flags;
+};
+
+struct MPI2_SGE_SIMPLE_UNION {
+ u32 FlagsLength;
+ union {
+ u32 Address32;
+ u64 Address64;
+ } u;
+};
+
+struct MPI2_SCSI_IO_CDB_EEDP32 {
+ u8 CDB[20]; /* 0x00 */
+ u32 PrimaryReferenceTag; /* 0x14 */
+ u16 PrimaryApplicationTag; /* 0x18 */
+ u16 PrimaryApplicationTagMask; /* 0x1A */
+ u32 TransferLength; /* 0x1C */
+};
+
+struct MPI2_SGE_CHAIN_UNION {
+ u16 Length;
+ u8 NextChainOffset;
+ u8 Flags;
+ union {
+ u32 Address32;
+ u64 Address64;
+ } u;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE32 {
+ u32 Address;
+ u32 FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_CHAIN32 {
+ u32 Address;
+ u32 FlagsLength;
+};
+
+struct MPI2_IEEE_SGE_SIMPLE64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 Reserved2;
+ u8 Flags;
+};
+
+struct MPI2_IEEE_SGE_CHAIN64 {
+ u64 Address;
+ u32 Length;
+ u16 Reserved1;
+ u8 Reserved2;
+ u8 Flags;
+};
+
+union MPI2_IEEE_SGE_SIMPLE_UNION {
+ struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
+};
+
+union MPI2_IEEE_SGE_CHAIN_UNION {
+ struct MPI2_IEEE_SGE_CHAIN32 Chain32;
+ struct MPI2_IEEE_SGE_CHAIN64 Chain64;
+};
+
+union MPI2_SGE_IO_UNION {
+ struct MPI2_SGE_SIMPLE_UNION MpiSimple;
+ struct MPI2_SGE_CHAIN_UNION MpiChain;
+ union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+};
+
+union MPI2_SCSI_IO_CDB_UNION {
+ u8 CDB32[32];
+ struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ struct MPI2_SGE_SIMPLE_UNION SGE;
+};
+
+/*
+ * RAID SCSI IO Request Message
+ * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
+ */
+struct MPI2_RAID_SCSI_IO_REQUEST {
+ u16 DevHandle; /* 0x00 */
+ u8 ChainOffset; /* 0x02 */
+ u8 Function; /* 0x03 */
+ u16 Reserved1; /* 0x04 */
+ u8 Reserved2; /* 0x06 */
+ u8 MsgFlags; /* 0x07 */
+ u8 VP_ID; /* 0x08 */
+ u8 VF_ID; /* 0x09 */
+ u16 Reserved3; /* 0x0A */
+ u32 SenseBufferLowAddress; /* 0x0C */
+ u16 SGLFlags; /* 0x10 */
+ u8 SenseBufferLength; /* 0x12 */
+ u8 Reserved4; /* 0x13 */
+ u8 SGLOffset0; /* 0x14 */
+ u8 SGLOffset1; /* 0x15 */
+ u8 SGLOffset2; /* 0x16 */
+ u8 SGLOffset3; /* 0x17 */
+ u32 SkipCount; /* 0x18 */
+ u32 DataLength; /* 0x1C */
+ u32 BidirectionalDataLength; /* 0x20 */
+ u16 IoFlags; /* 0x24 */
+ u16 EEDPFlags; /* 0x26 */
+ u32 EEDPBlockSize; /* 0x28 */
+ u32 SecondaryReferenceTag; /* 0x2C */
+ u16 SecondaryApplicationTag; /* 0x30 */
+ u16 ApplicationTagTranslationMask; /* 0x32 */
+ u8 LUN[8]; /* 0x34 */
+ u32 Control; /* 0x3C */
+ union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
+ struct RAID_CONTEXT RaidContext; /* 0x60 */
+ union MPI2_SGE_IO_UNION SGL; /* 0x80 */
+};
+
+/*
+ * MPT RAID MFA IO Descriptor.
+ */
+struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 MessageAddress1:24; /* bits 31:8*/
+ u32 RequestFlags:8;
+#else
+ u32 RequestFlags:8;
+ u32 MessageAddress1:24; /* bits 31:8*/
+#endif
+ u32 MessageAddress2; /* bits 61:32 */
+};
+
+/* Default Request Descriptor */
+struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 DescriptorTypeDependent; /* 0x06 */
+};
+
+/* High Priority Request Descriptor */
+struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 Reserved1; /* 0x06 */
+};
+
+/* SCSI IO Request Descriptor */
+struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 DevHandle; /* 0x06 */
+};
+
+/* SCSI Target Request Descriptor */
+struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* RAID Accelerator Request Descriptor */
+struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ u8 RequestFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 LMID; /* 0x04 */
+ u16 Reserved; /* 0x06 */
+};
+
+/* union of Request Descriptors */
+union MEGASAS_REQUEST_DESCRIPTOR_UNION {
+ struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+ u64 Words;
+ };
+};
+
+/* Default Reply Descriptor */
+struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 DescriptorTypeDependent1; /* 0x02 */
+ u32 DescriptorTypeDependent2; /* 0x04 */
+};
+
+/* Address Reply Descriptor */
+struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u32 ReplyFrameAddress; /* 0x04 */
+};
+
+/* SCSI IO Success Reply Descriptor */
+struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u16 TaskTag; /* 0x04 */
+ u16 Reserved1; /* 0x06 */
+};
+
+/* TargetAssist Success Reply Descriptor */
+struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u8 SequenceNumber; /* 0x04 */
+ u8 Reserved1; /* 0x05 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* Target Command Buffer Reply Descriptor */
+struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u8 VP_ID; /* 0x02 */
+ u8 Flags; /* 0x03 */
+ u16 InitiatorDevHandle; /* 0x04 */
+ u16 IoIndex; /* 0x06 */
+};
+
+/* RAID Accelerator Success Reply Descriptor */
+struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ u8 ReplyFlags; /* 0x00 */
+ u8 MSIxIndex; /* 0x01 */
+ u16 SMID; /* 0x02 */
+ u32 Reserved; /* 0x04 */
+};
+
+/* union of Reply Descriptors */
+union MPI2_REPLY_DESCRIPTORS_UNION {
+ struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+ RAIDAcceleratorSuccess;
+ u64 Words;
+};
+
+/* IOCInit Request message */
+struct MPI2_IOC_INIT_REQUEST {
+ u8 WhoInit; /* 0x00 */
+ u8 Reserved1; /* 0x01 */
+ u8 ChainOffset; /* 0x02 */
+ u8 Function; /* 0x03 */
+ u16 Reserved2; /* 0x04 */
+ u8 Reserved3; /* 0x06 */
+ u8 MsgFlags; /* 0x07 */
+ u8 VP_ID; /* 0x08 */
+ u8 VF_ID; /* 0x09 */
+ u16 Reserved4; /* 0x0A */
+ u16 MsgVersion; /* 0x0C */
+ u16 HeaderVersion; /* 0x0E */
+ u32 Reserved5; /* 0x10 */
+ u16 Reserved6; /* 0x14 */
+ u8 Reserved7; /* 0x16 */
+ u8 HostMSIxVectors; /* 0x17 */
+ u16 Reserved8; /* 0x18 */
+ u16 SystemRequestFrameSize; /* 0x1A */
+ u16 ReplyDescriptorPostQueueDepth; /* 0x1C */
+ u16 ReplyFreeQueueDepth; /* 0x1E */
+ u32 SenseBufferAddressHigh; /* 0x20 */
+ u32 SystemReplyAddressHigh; /* 0x24 */
+ u64 SystemRequestFrameBaseAddress; /* 0x28 */
+ u64 ReplyDescriptorPostQueueAddress;/* 0x30 */
+ u64 ReplyFreeQueueAddress; /* 0x38 */
+ u64 TimeStamp; /* 0x40 */
+};
+
+/* mrpriv defines */
+#define MR_PD_INVALID 0xFFFF
+#define MAX_SPAN_DEPTH 8
+#define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
+#define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
+#define MAX_ROW_SIZE 32
+#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
+#define MAX_LOGICAL_DRIVES 64
+#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
+#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
+#define MAX_ARRAYS 128
+#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_PHYSICAL_DEVICES 256
+#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
+#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
+
+struct MR_DEV_HANDLE_INFO {
+ u16 curDevHdl;
+ u8 validHandles;
+ u8 reserved;
+ u16 devHandle[2];
+};
+
+struct MR_ARRAY_INFO {
+ u16 pd[MAX_RAIDMAP_ROW_SIZE];
+};
+
+struct MR_QUAD_ELEMENT {
+ u64 logStart;
+ u64 logEnd;
+ u64 offsetInSpan;
+ u32 diff;
+ u32 reserved1;
+};
+
+struct MR_SPAN_INFO {
+ u32 noElements;
+ u32 reserved1;
+ struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_LD_SPAN {
+ u64 startBlk;
+ u64 numBlks;
+ u16 arrayRef;
+ u8 spanRowSize;
+ u8 spanRowDataSize;
+ u8 reserved[4];
+};
+
+struct MR_SPAN_BLOCK_INFO {
+ u64 num_rows;
+ struct MR_LD_SPAN span;
+ struct MR_SPAN_INFO block_span_info;
+};
+
+struct MR_LD_RAID {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved4:7;
+ u32 fpNonRWCapable:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteCapable:1;
+ u32 encryptionType:8;
+ u32 pdPiMode:4;
+ u32 ldPiMode:4;
+ u32 reserved5:3;
+ u32 fpCapable:1;
+#else
+ u32 fpCapable:1;
+ u32 reserved5:3;
+ u32 ldPiMode:4;
+ u32 pdPiMode:4;
+ u32 encryptionType:8;
+ u32 fpWriteCapable:1;
+ u32 fpReadCapable:1;
+ u32 fpWriteAcrossStripe:1;
+ u32 fpReadAcrossStripe:1;
+ u32 fpNonRWCapable:1;
+ u32 reserved4:7;
+#endif
+ } capability;
+ u32 reserved6;
+ u64 size;
+ u8 spanDepth;
+ u8 level;
+ u8 stripeShift;
+ u8 rowSize;
+ u8 rowDataSize;
+ u8 writeMode;
+ u8 PRL;
+ u8 SRL;
+ u16 targetId;
+ u8 ldState;
+ u8 regTypeReqOnWrite;
+ u8 modFactor;
+ u8 regTypeReqOnRead;
+ u16 seqNum;
+
+ struct {
+ u32 ldSyncRequired:1;
+ u32 reserved:31;
+ } flags;
+
+ u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
+ u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
+ u8 reserved3[0x80-0x2D]; /* 0x2D */
+};
+
+struct MR_LD_SPAN_MAP {
+ struct MR_LD_RAID ldRaid;
+ u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
+ struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
+};
+
+struct MR_FW_RAID_MAP {
+ u32 totalSize;
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ u32 reserved1[5];
+ };
+
+ u32 ldCount;
+ u32 Reserved1;
+ u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
+ MAX_RAIDMAP_VIEWS];
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+ struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ struct MR_LD_SPAN_MAP ldSpanMap[1];
+};
+
+struct IO_REQUEST_INFO {
+ u64 ldStartBlock;
+ u32 numBlocks;
+ u16 ldTgtId;
+ u8 isRead;
+ u16 devHandle;
+ u64 pdBlock;
+ u8 fpOkForIo;
+ u8 IoforUnevenSpan;
+ u8 start_span;
+ u8 reserved;
+ u64 start_row;
+};
+
+struct MR_LD_TARGET_SYNC {
+ u8 targetId;
+ u8 reserved;
+ u16 seqNum;
+};
+
+#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+#define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+struct megasas_register_set;
+struct megasas_instance;
+
+union desc_word {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+struct megasas_cmd_fusion {
+ struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
+ dma_addr_t io_request_phys_addr;
+
+ union MPI2_SGE_IO_UNION *sg_frame;
+ dma_addr_t sg_frame_phys_addr;
+
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
+ struct list_head list;
+ struct scsi_cmnd *scmd;
+ struct megasas_instance *instance;
+
+ u8 retry_for_fw_reset;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
+
+ /*
+ * Context for a MFI frame.
+ * Used to get the mfi cmd from list when a MFI cmd is completed
+ */
+ u32 sync_cmd_idx;
+ u32 index;
+ u8 flags;
+};
+
+struct LD_LOAD_BALANCE_INFO {
+ u8 loadBalanceFlag;
+ u8 reserved1;
+ u16 raid1DevHandle[2];
+ atomic_t scsi_pending_cmds[2];
+ u64 last_accessed_block[2];
+};
+
+/* SPAN_SET is info caclulated from span info from Raid map per LD */
+typedef struct _LD_SPAN_SET {
+ u64 log_start_lba;
+ u64 log_end_lba;
+ u64 span_row_start;
+ u64 span_row_end;
+ u64 data_strip_start;
+ u64 data_strip_end;
+ u64 data_row_start;
+ u64 data_row_end;
+ u8 strip_offset[MAX_SPAN_DEPTH];
+ u32 span_row_data_width;
+ u32 diff;
+ u32 reserved[2];
+} LD_SPAN_SET, *PLD_SPAN_SET;
+
+typedef struct LOG_BLOCK_SPAN_INFO {
+ LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
+} LD_SPAN_INFO, *PLD_SPAN_INFO;
+
+struct MR_FW_RAID_MAP_ALL {
+ struct MR_FW_RAID_MAP raidMap;
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+} __attribute__ ((packed));
+
+struct fusion_context {
+ struct megasas_cmd_fusion **cmd_list;
+ struct list_head cmd_pool;
+
+ spinlock_t cmd_pool_lock;
+
+ dma_addr_t req_frames_desc_phys;
+ u8 *req_frames_desc;
+
+ struct dma_pool *io_request_frames_pool;
+ dma_addr_t io_request_frames_phys;
+ u8 *io_request_frames;
+
+ struct dma_pool *sg_dma_pool;
+ struct dma_pool *sense_dma_pool;
+
+ dma_addr_t reply_frames_desc_phys;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
+ struct dma_pool *reply_frames_desc_pool;
+
+ u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
+
+ u32 reply_q_depth;
+ u32 request_alloc_sz;
+ u32 reply_alloc_sz;
+ u32 io_frames_alloc_sz;
+
+ u16 max_sge_in_main_msg;
+ u16 max_sge_in_chain;
+
+ u8 chain_offset_io_request;
+ u8 chain_offset_mfi_pthru;
+
+ struct MR_FW_RAID_MAP_ALL *ld_map[2];
+ dma_addr_t ld_map_phys[2];
+
+ u32 map_sz;
+ u8 fast_path_io;
+ struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+};
+
+union desc_value {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+#endif /* _MEGARAID_SAS_FUSION_H_ */