aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/aacraid
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/aacraid')
-rw-r--r--drivers/scsi/aacraid/Makefile4
-rw-r--r--drivers/scsi/aacraid/README74
-rw-r--r--drivers/scsi/aacraid/TODO1
-rw-r--r--drivers/scsi/aacraid/aachba.c2939
-rw-r--r--drivers/scsi/aacraid/aacraid.h866
-rw-r--r--drivers/scsi/aacraid/commctrl.c494
-rw-r--r--drivers/scsi/aacraid/comminit.c191
-rw-r--r--drivers/scsi/aacraid/commsup.c1615
-rw-r--r--drivers/scsi/aacraid/dpcsup.c243
-rw-r--r--drivers/scsi/aacraid/linit.c941
-rw-r--r--drivers/scsi/aacraid/nark.c84
-rw-r--r--drivers/scsi/aacraid/rkt.c412
-rw-r--r--drivers/scsi/aacraid/rx.c447
-rw-r--r--drivers/scsi/aacraid/sa.c135
-rw-r--r--drivers/scsi/aacraid/src.c833
15 files changed, 6661 insertions, 2618 deletions
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
index 28d133a3094..1bd9fd18f7f 100644
--- a/drivers/scsi/aacraid/Makefile
+++ b/drivers/scsi/aacraid/Makefile
@@ -3,6 +3,6 @@
obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \
- dpcsup.o rx.o sa.o rkt.o
+ dpcsup.o rx.o sa.o rkt.o nark.o src.o
-EXTRA_CFLAGS := -Idrivers/scsi
+ccflags-y := -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/README b/drivers/scsi/aacraid/README
deleted file mode 100644
index 4fa524687bc..00000000000
--- a/drivers/scsi/aacraid/README
+++ /dev/null
@@ -1,74 +0,0 @@
-AACRAID Driver for Linux (take two)
-
-Introduction
--------------------------
-The aacraid driver adds support for Adaptec (http://www.adaptec.com)
-RAID controllers. This is a major rewrite from the original
-Adaptec supplied driver. It has signficantly cleaned up both the code
-and the running binary size (the module is less than half the size of
-the original).
-
-Supported Cards/Chipsets
--------------------------
- Adaptec 2020S
- Adaptec 2025S
- Adaptec 2120S
- Adaptec 2130S
- Adaptec 2200S
- Adaptec 2230S
- Adaptec 2240S
- Adaptec 2410SA
- Adaptec 2610SA
- Adaptec 2810SA
- Adaptec 21610SA
- Adaptec 3230S
- Adaptec 3240S
- Adaptec 4000SAS
- Adaptec 4005SAS
- Adaptec 4800SAS
- Adaptec 4805SAS
- Adaptec 5400S
- Dell PERC 2 Quad Channel
- Dell PERC 2/Si
- Dell PERC 3/Si
- Dell PERC 3/Di
- Dell CERC 2
- HP NetRAID-4M
- Legend S220
- Legend S230
- IBM ServeRAID 8i
- ICP 9014R0
- ICP 9024R0
- ICP 9047MA
- ICP 9087MA
- ICP 9085LI
- ICP 5085AU
-
-People
--------------------------
-Alan Cox <alan@redhat.com>
-Christoph Hellwig <hch@infradead.org> (updates for new-style PCI probing and SCSI host registration,
- small cleanups/fixes)
-Matt Domsch <matt_domsch@dell.com> (revision ioctl, adapter messages)
-Deanna Bonds (non-DASD support, PAE fibs and 64 bit, added new adaptec controllers
- added new ioctls, changed scsi interface to use new error handler,
- increased the number of fibs and outstanding commands to a container)
-
- (fixed 64bit and 64G memory model, changed confusing naming convention
- where fibs that go to the hardware are consistently called hw_fibs and
- not just fibs like the name of the driver tracking structure)
-Mark Salyzyn <Mark_Salyzyn@adaptec.com> Fixed panic issues and added some new product ids for upcoming hbas.
-
-Original Driver
--------------------------
-Adaptec Unix OEM Product Group
-
-Mailing List
--------------------------
-linux-scsi@vger.kernel.org (Interested parties troll here)
-Also note this is very different to Brian's original driver
-so don't expect him to support it.
-Adaptec does support this driver. Contact either tech support or Mark Salyzyn.
-
-Original by Brian Boerner February 2001
-Rewritten by Alan Cox, November 2001
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO
index 2f148b4617d..78dc863eff4 100644
--- a/drivers/scsi/aacraid/TODO
+++ b/drivers/scsi/aacraid/TODO
@@ -1,4 +1,3 @@
o Testing
o More testing
-o Drop irq_mask, basically unused
o I/O size increase
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index ccdf440021f..681434e2dfe 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,14 +27,14 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
-#include <asm/semaphore.h>
#include <asm/uaccess.h>
+#include <linux/highmem.h> /* For flush_kernel_dcache_page */
+#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -56,54 +57,54 @@
/*
* Sense codes
*/
-
-#define SENCODE_NO_SENSE 0x00
-#define SENCODE_END_OF_DATA 0x00
-#define SENCODE_BECOMING_READY 0x04
-#define SENCODE_INIT_CMD_REQUIRED 0x04
-#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
-#define SENCODE_INVALID_COMMAND 0x20
-#define SENCODE_LBA_OUT_OF_RANGE 0x21
-#define SENCODE_INVALID_CDB_FIELD 0x24
-#define SENCODE_LUN_NOT_SUPPORTED 0x25
-#define SENCODE_INVALID_PARAM_FIELD 0x26
-#define SENCODE_PARAM_NOT_SUPPORTED 0x26
-#define SENCODE_PARAM_VALUE_INVALID 0x26
-#define SENCODE_RESET_OCCURRED 0x29
-#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
-#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
-#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
-#define SENCODE_DIAGNOSTIC_FAILURE 0x40
-#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
-#define SENCODE_INVALID_MESSAGE_ERROR 0x49
-#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
-#define SENCODE_OVERLAPPED_COMMAND 0x4E
+
+#define SENCODE_NO_SENSE 0x00
+#define SENCODE_END_OF_DATA 0x00
+#define SENCODE_BECOMING_READY 0x04
+#define SENCODE_INIT_CMD_REQUIRED 0x04
+#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
+#define SENCODE_INVALID_COMMAND 0x20
+#define SENCODE_LBA_OUT_OF_RANGE 0x21
+#define SENCODE_INVALID_CDB_FIELD 0x24
+#define SENCODE_LUN_NOT_SUPPORTED 0x25
+#define SENCODE_INVALID_PARAM_FIELD 0x26
+#define SENCODE_PARAM_NOT_SUPPORTED 0x26
+#define SENCODE_PARAM_VALUE_INVALID 0x26
+#define SENCODE_RESET_OCCURRED 0x29
+#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
+#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
+#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
+#define SENCODE_DIAGNOSTIC_FAILURE 0x40
+#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
+#define SENCODE_INVALID_MESSAGE_ERROR 0x49
+#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
+#define SENCODE_OVERLAPPED_COMMAND 0x4E
/*
* Additional sense codes
*/
-
-#define ASENCODE_NO_SENSE 0x00
-#define ASENCODE_END_OF_DATA 0x05
-#define ASENCODE_BECOMING_READY 0x01
-#define ASENCODE_INIT_CMD_REQUIRED 0x02
-#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
-#define ASENCODE_INVALID_COMMAND 0x00
-#define ASENCODE_LBA_OUT_OF_RANGE 0x00
-#define ASENCODE_INVALID_CDB_FIELD 0x00
-#define ASENCODE_LUN_NOT_SUPPORTED 0x00
-#define ASENCODE_INVALID_PARAM_FIELD 0x00
-#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
-#define ASENCODE_PARAM_VALUE_INVALID 0x02
-#define ASENCODE_RESET_OCCURRED 0x00
-#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
-#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
-#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
-#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
-#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
-#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
-#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
-#define ASENCODE_OVERLAPPED_COMMAND 0x00
+
+#define ASENCODE_NO_SENSE 0x00
+#define ASENCODE_END_OF_DATA 0x05
+#define ASENCODE_BECOMING_READY 0x01
+#define ASENCODE_INIT_CMD_REQUIRED 0x02
+#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
+#define ASENCODE_INVALID_COMMAND 0x00
+#define ASENCODE_LBA_OUT_OF_RANGE 0x00
+#define ASENCODE_INVALID_CDB_FIELD 0x00
+#define ASENCODE_LUN_NOT_SUPPORTED 0x00
+#define ASENCODE_INVALID_PARAM_FIELD 0x00
+#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
+#define ASENCODE_PARAM_VALUE_INVALID 0x02
+#define ASENCODE_RESET_OCCURRED 0x00
+#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
+#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
+#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
+#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
+#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
+#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
+#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
+#define ASENCODE_OVERLAPPED_COMMAND 0x00
#define BYTE0(x) (unsigned char)(x)
#define BYTE1(x) (unsigned char)((x) >> 8)
@@ -115,8 +116,8 @@
*----------------------------------------------------------------------------*/
/* SCSI inquiry data */
struct inquiry_data {
- u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
- u8 inqd_dtq; /* RMB | Device Type Qualifier */
+ u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
+ u8 inqd_dtq; /* RMB | Device Type Qualifier */
u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
u8 inqd_len; /* Additional length (n-4) */
@@ -130,9 +131,14 @@ struct inquiry_data {
/*
* M O D U L E G L O B A L S
*/
-
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
+
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max);
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
+ int pages, int nseg, int nseg_new);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
@@ -140,42 +146,135 @@ static char *aac_get_status_string(u32 status);
/*
* Non dasd selection is handled entirely in aachba now
- */
-
+ */
+
static int nondasd = -1;
+static int aac_cache = 2; /* WCE=0 to avoid performance problems */
static int dacmode = -1;
-
-static int commit = -1;
-
-module_param(nondasd, int, 0);
-MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
-module_param(dacmode, int, 0);
-MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
-module_param(commit, int, 0);
-MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
+int aac_msi;
+int aac_commit = -1;
+int startup_timeout = 180;
+int aif_timeout = 120;
+int aac_sync_mode; /* Only Sync. transfer - disabled */
+int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
+
+module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
+ " 0=off, 1=on");
+module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
+ " 0=off, 1=on");
+module_param(nondasd, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
+ " 0=off, 1=on");
+module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
+ "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
+ "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
+ "\tbit 2 - Disable only if Battery is protecting Cache");
+module_param(dacmode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
+ " 0=off, 1=on");
+module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
+ " adapter for foreign arrays.\n"
+ "This is typically needed in systems that do not have a BIOS."
+ " 0=off, 1=on");
+module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(msi, "IRQ handling."
+ " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)");
+module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
+ " adapter to have it's kernel up and\n"
+ "running. This is typically adjusted for large systems that do not"
+ " have a BIOS.");
+module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
+ " applications to pick up AIFs before\n"
+ "deregistering them. This is typically adjusted for heavily burdened"
+ " systems.");
int numacb = -1;
module_param(numacb, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid\nvalues are 512 and down. Default is to use suggestion from Firmware.");
+MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
+ " blocks (FIB) allocated. Valid values are 512 and down. Default is"
+ " to use suggestion from Firmware.");
int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512,\n2048, 4096 and 8192. Default is to use suggestion from Firmware.");
+MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
+ " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
+ " suggestion from Firmware.");
+
+int update_interval = 30 * 60;
+module_param(update_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
+ " updates issued to adapter.");
+
+int check_interval = 24 * 60 * 60;
+module_param(check_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
+ " checks.");
+
+int aac_check_reset = 1;
+module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
+ " adapter. a value of -1 forces the reset to adapters programmed to"
+ " ignore it.");
+
+int expose_physicals = -1;
+module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
+ " -1=protect 0=off, 1=on");
+
+int aac_reset_devices;
+module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
+
+int aac_wwn = 1;
+module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
+ "\t0 - Disable\n"
+ "\t1 - Array Meta Data Signature (default)\n"
+ "\t2 - Adapter Serial Number");
+
+
+static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
+ struct fib *fibptr) {
+ struct scsi_device *device;
+
+ if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
+ dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return 0;
+ }
+ scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
+ device = scsicmd->device;
+ if (unlikely(!device || !scsi_device_online(device))) {
+ dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return 0;
+ }
+ return 1;
+}
+
/**
* aac_get_config_status - check the adapter configuration
* @common: adapter to query
*
* Query config status, and commit the configuration if needed.
*/
-int aac_get_config_status(struct aac_dev *dev)
+int aac_get_config_status(struct aac_dev *dev, int commit_flag)
{
int status = 0;
struct fib * fibptr;
- if (!(fibptr = fib_alloc(dev)))
+ if (!(fibptr = aac_fib_alloc(dev)))
return -ENOMEM;
- fib_init(fibptr);
+ aac_fib_init(fibptr);
{
struct aac_get_config_status *dinfo;
dinfo = (struct aac_get_config_status *) fib_data(fibptr);
@@ -185,13 +284,13 @@ int aac_get_config_status(struct aac_dev *dev)
dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
}
- status = fib_send(ContainerCommand,
+ status = aac_fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_get_config_status),
FsaNormal,
1, 1,
NULL, NULL);
- if (status < 0 ) {
+ if (status < 0) {
printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
} else {
struct aac_get_config_status_resp *reply
@@ -208,33 +307,51 @@ int aac_get_config_status(struct aac_dev *dev)
status = -EINVAL;
}
}
- fib_complete(fibptr);
+ /* Do not set XferState to zero unless receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+
/* Send a CT_COMMIT_CONFIG to enable discovery of devices */
if (status >= 0) {
- if (commit == 1) {
+ if ((aac_commit == 1) || commit_flag) {
struct aac_commit_config * dinfo;
- fib_init(fibptr);
+ aac_fib_init(fibptr);
dinfo = (struct aac_commit_config *) fib_data(fibptr);
-
+
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
-
- status = fib_send(ContainerCommand,
+
+ status = aac_fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_commit_config),
FsaNormal,
1, 1,
NULL, NULL);
- fib_complete(fibptr);
- } else if (commit == 0) {
+ /* Do not set XferState to zero unless
+ * receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+ } else if (aac_commit == 0) {
printk(KERN_WARNING
"aac_get_config_status: Foreign device configurations are being ignored\n");
}
}
- fib_free(fibptr);
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
return status;
}
+static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
+{
+ char inq_data;
+ scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
+ inq_data &= 0xdf;
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ }
+}
+
/**
* aac_get_containers - list containers
* @common: adapter to probe
@@ -244,25 +361,22 @@ int aac_get_config_status(struct aac_dev *dev)
int aac_get_containers(struct aac_dev *dev)
{
struct fsa_dev_info *fsa_dev_ptr;
- u32 index;
+ u32 index;
int status = 0;
struct fib * fibptr;
- unsigned instance;
struct aac_get_container_count *dinfo;
struct aac_get_container_count_resp *dresp;
int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
- instance = dev->scsi_host_ptr->unique_id;
-
- if (!(fibptr = fib_alloc(dev)))
+ if (!(fibptr = aac_fib_alloc(dev)))
return -ENOMEM;
- fib_init(fibptr);
+ aac_fib_init(fibptr);
dinfo = (struct aac_get_container_count *) fib_data(fibptr);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
- status = fib_send(ContainerCommand,
+ status = aac_fib_send(ContainerCommand,
fibptr,
sizeof (struct aac_get_container_count),
FsaNormal,
@@ -271,83 +385,41 @@ int aac_get_containers(struct aac_dev *dev)
if (status >= 0) {
dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
- fib_complete(fibptr);
+ aac_fib_complete(fibptr);
}
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
- fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
- sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
- if (!fsa_dev_ptr) {
- fib_free(fibptr);
+ fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
+ GFP_KERNEL);
+ if (!fsa_dev_ptr)
return -ENOMEM;
- }
- memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
dev->fsa_dev = fsa_dev_ptr;
dev->maximum_num_containers = maximum_num_containers;
- for (index = 0; index < dev->maximum_num_containers; index++) {
- struct aac_query_mount *dinfo;
- struct aac_mount *dresp;
-
+ for (index = 0; index < dev->maximum_num_containers; ) {
fsa_dev_ptr[index].devname[0] = '\0';
- fib_init(fibptr);
- dinfo = (struct aac_query_mount *) fib_data(fibptr);
-
- dinfo->command = cpu_to_le32(VM_NameServe);
- dinfo->count = cpu_to_le32(index);
- dinfo->type = cpu_to_le32(FT_FILESYS);
+ status = aac_probe_container(dev, index);
- status = fib_send(ContainerCommand,
- fibptr,
- sizeof (struct aac_query_mount),
- FsaNormal,
- 1, 1,
- NULL, NULL);
- if (status < 0 ) {
+ if (status < 0) {
printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
break;
}
- dresp = (struct aac_mount *)fib_data(fibptr);
-
- dprintk ((KERN_DEBUG
- "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n",
- (int)index, (int)le32_to_cpu(dresp->status),
- (int)le32_to_cpu(dresp->mnt[0].vol),
- (int)le32_to_cpu(dresp->mnt[0].state),
- (unsigned)le32_to_cpu(dresp->mnt[0].capacity)));
- if ((le32_to_cpu(dresp->status) == ST_OK) &&
- (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
- (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
- fsa_dev_ptr[index].valid = 1;
- fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
- fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity);
- if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
- fsa_dev_ptr[index].ro = 1;
- }
- fib_complete(fibptr);
+
/*
* If there are no more containers, then stop asking.
*/
- if ((index + 1) >= le32_to_cpu(dresp->count)){
+ if (++index >= status)
break;
- }
}
- fib_free(fibptr);
return status;
}
-static void aac_io_done(struct scsi_cmnd * scsicmd)
-{
- unsigned long cpu_flags;
- struct Scsi_Host *host = scsicmd->device->host;
- spin_lock_irqsave(host->host_lock, cpu_flags);
- scsicmd->scsi_done(scsicmd);
- spin_unlock_irqrestore(host->host_lock, cpu_flags);
-}
-
static void get_container_name_callback(void *context, struct fib * fibptr)
{
struct aac_get_name_resp * get_name_reply;
@@ -355,37 +427,46 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
scsicmd = (struct scsi_cmnd *) context;
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
- if (fibptr == NULL)
- BUG();
+ BUG_ON(fibptr == NULL);
get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
/* Failure is irrelevant, using default value instead */
if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) {
- int count;
- char * dp;
- char * sp = get_name_reply->data;
+ char *sp = get_name_reply->data;
sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
while (*sp == ' ')
++sp;
- count = sizeof(((struct inquiry_data *)NULL)->inqd_pid);
- dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid;
- if (*sp) do {
- *dp++ = (*sp) ? *sp++ : ' ';
- } while (--count > 0);
+ if (*sp) {
+ struct inquiry_data inq;
+ char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
+ int count = sizeof(d);
+ char *dp = d;
+ do {
+ *dp++ = (*sp) ? *sp++ : ' ';
+ } while (--count > 0);
+
+ scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
+ memcpy(inq.inqd_pid, d, sizeof(d));
+ scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
+ }
}
+
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- fib_complete(fibptr);
- fib_free(fibptr);
- aac_io_done(scsicmd);
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
}
/**
* aac_get_container_name - get container name, none blocking.
*/
-static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
+static int aac_get_container_name(struct scsi_cmnd * scsicmd)
{
int status;
struct aac_get_name *dinfo;
@@ -394,96 +475,229 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (!(cmd_fibcontext = fib_alloc(dev)))
+ if (!(cmd_fibcontext = aac_fib_alloc(dev)))
return -ENOMEM;
- fib_init(cmd_fibcontext);
+ aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_READ_NAME);
- dinfo->cid = cpu_to_le32(cid);
+ dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
- status = fib_send(ContainerCommand,
- cmd_fibcontext,
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
sizeof (struct aac_get_name),
- FsaNormal,
- 0, 1,
- (fib_callback) get_container_name_callback,
+ FsaNormal,
+ 0, 1,
+ (fib_callback)get_container_name_callback,
(void *) scsicmd);
-
+
/*
* Check that the command queued to the controller
*/
- if (status == -EINPROGRESS)
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
-
- printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status);
- fib_complete(cmd_fibcontext);
- fib_free(cmd_fibcontext);
+ }
+
+ printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
return -1;
}
-/**
- * probe_container - query a logical volume
- * @dev: device to query
- * @cid: container identifier
- *
- * Queries the controller about the given volume. The volume information
- * is updated in the struct fsa_dev_info structure rather than returned.
- */
-
-static int probe_container(struct aac_dev *dev, int cid)
+static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
+{
+ struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+
+ if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
+ return aac_scsi_cmd(scsicmd);
+
+ scsicmd->result = DID_NO_CONNECT << 16;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+}
+
+static void _aac_probe_container2(void * context, struct fib * fibptr)
{
struct fsa_dev_info *fsa_dev_ptr;
- int status;
+ int (*callback)(struct scsi_cmnd *);
+ struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ scsicmd->SCp.Status = 0;
+ fsa_dev_ptr = fibptr->dev->fsa_dev;
+ if (fsa_dev_ptr) {
+ struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
+ fsa_dev_ptr += scmd_id(scsicmd);
+
+ if ((le32_to_cpu(dresp->status) == ST_OK) &&
+ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
+ (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+ fsa_dev_ptr->valid = 1;
+ /* sense_key holds the current state of the spin-up */
+ if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
+ fsa_dev_ptr->sense_data.sense_key = NOT_READY;
+ else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
+ fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
+ fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
+ fsa_dev_ptr->size
+ = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+ (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
+ fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
+ }
+ if ((fsa_dev_ptr->valid & 1) == 0)
+ fsa_dev_ptr->valid = 0;
+ scsicmd->SCp.Status = le32_to_cpu(dresp->count);
+ }
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
+ scsicmd->SCp.ptr = NULL;
+ (*callback)(scsicmd);
+ return;
+}
+
+static void _aac_probe_container1(void * context, struct fib * fibptr)
+{
+ struct scsi_cmnd * scsicmd;
+ struct aac_mount * dresp;
struct aac_query_mount *dinfo;
- struct aac_mount *dresp;
- struct fib * fibptr;
- unsigned instance;
+ int status;
- fsa_dev_ptr = dev->fsa_dev;
- instance = dev->scsi_host_ptr->unique_id;
+ dresp = (struct aac_mount *) fib_data(fibptr);
+ dresp->mnt[0].capacityhigh = 0;
+ if ((le32_to_cpu(dresp->status) != ST_OK) ||
+ (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+ _aac_probe_container2(context, fibptr);
+ return;
+ }
+ scsicmd = (struct scsi_cmnd *) context;
- if (!(fibptr = fib_alloc(dev)))
- return -ENOMEM;
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
- fib_init(fibptr);
+ aac_fib_init(fibptr);
dinfo = (struct aac_query_mount *)fib_data(fibptr);
- dinfo->command = cpu_to_le32(VM_NameServe);
- dinfo->count = cpu_to_le32(cid);
+ dinfo->command = cpu_to_le32(VM_NameServe64);
+ dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
- status = fib_send(ContainerCommand,
- fibptr,
- sizeof(struct aac_query_mount),
- FsaNormal,
- 1, 1,
- NULL, NULL);
- if (status < 0) {
- printk(KERN_WARNING "aacraid: probe_container query failed.\n");
- goto error;
+ status = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 0, 1,
+ _aac_probe_container2,
+ (void *) scsicmd);
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS)
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ else if (status < 0) {
+ /* Inherit results from VM_NameServe, if any */
+ dresp->status = cpu_to_le32(ST_OK);
+ _aac_probe_container2(context, fibptr);
}
+}
- dresp = (struct aac_mount *) fib_data(fibptr);
+static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
+{
+ struct fib * fibptr;
+ int status = -ENOMEM;
- if ((le32_to_cpu(dresp->status) == ST_OK) &&
- (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
- (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
- fsa_dev_ptr[cid].valid = 1;
- fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
- fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity);
- if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
- fsa_dev_ptr[cid].ro = 1;
+ if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
+ struct aac_query_mount *dinfo;
+
+ aac_fib_init(fibptr);
+
+ dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+ dinfo->command = cpu_to_le32(VM_NameServe);
+ dinfo->count = cpu_to_le32(scmd_id(scsicmd));
+ dinfo->type = cpu_to_le32(FT_FILESYS);
+ scsicmd->SCp.ptr = (char *)callback;
+
+ status = aac_fib_send(ContainerCommand,
+ fibptr,
+ sizeof(struct aac_query_mount),
+ FsaNormal,
+ 0, 1,
+ _aac_probe_container1,
+ (void *) scsicmd);
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+ if (status < 0) {
+ scsicmd->SCp.ptr = NULL;
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
+ }
+ if (status < 0) {
+ struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+ if (fsa_dev_ptr) {
+ fsa_dev_ptr += scmd_id(scsicmd);
+ if ((fsa_dev_ptr->valid & 1) == 0) {
+ fsa_dev_ptr->valid = 0;
+ return (*callback)(scsicmd);
+ }
+ }
}
+ return status;
+}
+
+/**
+ * aac_probe_container - query a logical volume
+ * @dev: device to query
+ * @cid: container identifier
+ *
+ * Queries the controller about the given volume. The volume information
+ * is updated in the struct fsa_dev_info structure rather than returned.
+ */
+static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
+{
+ scsicmd->device = NULL;
+ return 0;
+}
-error:
- fib_complete(fibptr);
- fib_free(fibptr);
+int aac_probe_container(struct aac_dev *dev, int cid)
+{
+ struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
+ struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
+ int status;
+ if (!scsicmd || !scsidev) {
+ kfree(scsicmd);
+ kfree(scsidev);
+ return -ENOMEM;
+ }
+ scsicmd->list.next = NULL;
+ scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
+
+ scsicmd->device = scsidev;
+ scsidev->sdev_state = 0;
+ scsidev->id = cid;
+ scsidev->host = dev->scsi_host_ptr;
+
+ if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
+ while (scsicmd->device == scsidev)
+ schedule();
+ kfree(scsidev);
+ status = scsicmd->SCp.Status;
+ kfree(scsicmd);
return status;
}
@@ -499,114 +713,620 @@ struct scsi_inq {
* @a: string to copy from
* @b: string to copy to
*
- * Copy a String from one location to another
+ * Copy a String from one location to another
* without copying \0
*/
static void inqstrcpy(char *a, char *b)
{
- while(*a != (char)0)
+ while (*a != (char)0)
*b++ = *a++;
}
static char *container_types[] = {
- "None",
- "Volume",
- "Mirror",
- "Stripe",
- "RAID5",
- "SSRW",
- "SSRO",
- "Morph",
- "Legacy",
- "RAID4",
- "RAID10",
- "RAID00",
- "V-MIRRORS",
- "PSEUDO R4",
+ "None",
+ "Volume",
+ "Mirror",
+ "Stripe",
+ "RAID5",
+ "SSRW",
+ "SSRO",
+ "Morph",
+ "Legacy",
+ "RAID4",
+ "RAID10",
+ "RAID00",
+ "V-MIRRORS",
+ "PSEUDO R4",
"RAID50",
"RAID5D",
"RAID5D0",
"RAID1E",
"RAID6",
"RAID60",
- "Unknown"
+ "Unknown"
};
-
+char * get_container_type(unsigned tindex)
+{
+ if (tindex >= ARRAY_SIZE(container_types))
+ tindex = ARRAY_SIZE(container_types) - 1;
+ return container_types[tindex];
+}
/* Function: setinqstr
*
* Arguments: [1] pointer to void [1] int
*
* Purpose: Sets SCSI inquiry data strings for vendor, product
- * and revision level. Allows strings to be set in platform dependant
- * files instead of in OS dependant driver source.
+ * and revision level. Allows strings to be set in platform dependent
+ * files instead of in OS dependent driver source.
*/
-static void setinqstr(int devtype, void *data, int tindex)
+static void setinqstr(struct aac_dev *dev, void *data, int tindex)
{
struct scsi_inq *str;
- struct aac_driver_ident *mp;
- mp = aac_get_driver_ident(devtype);
-
str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
+ memset(str, ' ', sizeof(*str));
+
+ if (dev->supplement_adapter_info.AdapterTypeText[0]) {
+ char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ int c;
+ if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
+ inqstrcpy("SMC", str->vid);
+ else {
+ c = sizeof(str->vid);
+ while (*cp && *cp != ' ' && --c)
+ ++cp;
+ c = *cp;
+ *cp = '\0';
+ inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
+ str->vid);
+ *cp = c;
+ while (*cp && *cp != ' ')
+ ++cp;
+ }
+ while (*cp == ' ')
+ ++cp;
+ /* last six chars reserved for vol type */
+ c = 0;
+ if (strlen(cp) > sizeof(str->pid)) {
+ c = cp[sizeof(str->pid)];
+ cp[sizeof(str->pid)] = '\0';
+ }
+ inqstrcpy (cp, str->pid);
+ if (c)
+ cp[sizeof(str->pid)] = c;
+ } else {
+ struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
- inqstrcpy (mp->vname, str->vid);
- inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
+ inqstrcpy (mp->vname, str->vid);
+ /* last six chars reserved for vol type */
+ inqstrcpy (mp->model, str->pid);
+ }
- if (tindex < (sizeof(container_types)/sizeof(char *))){
+ if (tindex < ARRAY_SIZE(container_types)){
char *findit = str->pid;
for ( ; *findit != ' '; findit++); /* walk till we find a space */
/* RAID is superfluous in the context of a RAID device */
if (memcmp(findit-4, "RAID", 4) == 0)
*(findit -= 4) = ' ';
- inqstrcpy (container_types[tindex], findit + 1);
+ if (((findit - str->pid) + strlen(container_types[tindex]))
+ < (sizeof(str->pid) + sizeof(str->prl)))
+ inqstrcpy (container_types[tindex], findit + 1);
}
inqstrcpy ("V1.0", str->prl);
}
-static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
- u8 a_sense_code, u8 incorrect_length,
- u8 bit_pointer, u16 field_pointer,
- u32 residue)
+static void get_container_serial_callback(void *context, struct fib * fibptr)
{
- sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
- sense_buf[1] = 0; /* Segment number, always zero */
+ struct aac_get_serial_resp * get_serial_reply;
+ struct scsi_cmnd * scsicmd;
- if (incorrect_length) {
- sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
- sense_buf[3] = BYTE3(residue);
- sense_buf[4] = BYTE2(residue);
- sense_buf[5] = BYTE1(residue);
- sense_buf[6] = BYTE0(residue);
- } else
- sense_buf[2] = sense_key; /* Sense key */
+ BUG_ON(fibptr == NULL);
- if (sense_key == ILLEGAL_REQUEST)
- sense_buf[7] = 10; /* Additional sense length */
- else
- sense_buf[7] = 6; /* Additional sense length */
+ scsicmd = (struct scsi_cmnd *) context;
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
+ /* Failure is irrelevant, using default value instead */
+ if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
+ char sp[13];
+ /* EVPD bit set */
+ sp[0] = INQD_PDT_DA;
+ sp[1] = scsicmd->cmnd[2];
+ sp[2] = 0;
+ sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+ le32_to_cpu(get_serial_reply->uid));
+ scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp));
+ }
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ * aac_get_container_serial - get container serial, none blocking.
+ */
+static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
+{
+ int status;
+ struct aac_get_serial *dinfo;
+ struct fib * cmd_fibcontext;
+ struct aac_dev * dev;
+
+ dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+ if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ return -ENOMEM;
+
+ aac_fib_init(cmd_fibcontext);
+ dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
+
+ dinfo->command = cpu_to_le32(VM_ContainerConfig);
+ dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
+ dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof (struct aac_get_serial),
+ FsaNormal,
+ 0, 1,
+ (fib_callback) get_container_serial_callback,
+ (void *) scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return -1;
+}
+
+/* Function: setinqserial
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI Unit Serial number.
+ * This is a fake. We should read a proper
+ * serial number from the container. <SuSE>But
+ * without docs it's quite hard to do it :-)
+ * So this will have to do in the meantime.</SuSE>
+ */
+
+static int setinqserial(struct aac_dev *dev, void *data, int cid)
+{
+ /*
+ * This breaks array migration.
+ */
+ return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
+ le32_to_cpu(dev->adapter_info.serial[0]), cid);
+}
+
+static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
+ u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
+{
+ u8 *sense_buf = (u8 *)sense_data;
+ /* Sense data valid, err code 70h */
+ sense_buf[0] = 0x70; /* No info field */
+ sense_buf[1] = 0; /* Segment number, always zero */
+
+ sense_buf[2] = sense_key; /* Sense key */
sense_buf[12] = sense_code; /* Additional sense code */
sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
+
if (sense_key == ILLEGAL_REQUEST) {
- sense_buf[15] = 0;
+ sense_buf[7] = 10; /* Additional sense length */
- if (sense_code == SENCODE_INVALID_PARAM_FIELD)
- sense_buf[15] = 0x80;/* Std sense key specific field */
+ sense_buf[15] = bit_pointer;
/* Illegal parameter is in the parameter block */
-
if (sense_code == SENCODE_INVALID_CDB_FIELD)
- sense_buf[15] = 0xc0;/* Std sense key specific field */
+ sense_buf[15] |= 0xc0;/* Std sense key specific field */
/* Illegal parameter is in the CDB block */
- sense_buf[15] |= bit_pointer;
sense_buf[16] = field_pointer >> 8; /* MSB */
sense_buf[17] = field_pointer; /* LSB */
+ } else
+ sense_buf[7] = 6; /* Additional sense length */
+}
+
+static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
+{
+ if (lba & 0xffffffff00000000LL) {
+ int cid = scmd_id(cmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ cmd->scsi_done(cmd);
+ return 1;
+ }
+ return 0;
+}
+
+static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
+{
+ return 0;
+}
+
+static void io_callback(void *context, struct fib * fibptr);
+
+static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+ struct aac_dev *dev = fib->dev;
+ u16 fibsize, command;
+ long ret;
+
+ aac_fib_init(fib);
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ struct aac_raw_io2 *readcmd2;
+ readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
+ memset(readcmd2, 0, sizeof(struct aac_raw_io2));
+ readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ readcmd2->byteCount = cpu_to_le32(count<<9);
+ readcmd2->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
+ ret = aac_build_sgraw2(cmd, readcmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo2;
+ fibsize = sizeof(struct aac_raw_io2) +
+ ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ } else {
+ struct aac_raw_io *readcmd;
+ readcmd = (struct aac_raw_io *) fib_data(fib);
+ readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ readcmd->count = cpu_to_le32(count<<9);
+ readcmd->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
+ readcmd->bpTotal = 0;
+ readcmd->bpComplete = 0;
+ ret = aac_build_sgraw(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo;
+ fibsize = sizeof(struct aac_raw_io) +
+ ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
+ }
+
+ BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(command,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+ u16 fibsize;
+ struct aac_read64 *readcmd;
+ long ret;
+
+ aac_fib_init(fib);
+ readcmd = (struct aac_read64 *) fib_data(fib);
+ readcmd->command = cpu_to_le32(VM_CtHostRead64);
+ readcmd->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd->sector_count = cpu_to_le16(count);
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->pad = 0;
+ readcmd->flags = 0;
+
+ ret = aac_build_sg64(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_read64) +
+ ((le32_to_cpu(readcmd->sg.count) - 1) *
+ sizeof (struct sgentry64));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand64,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+ u16 fibsize;
+ struct aac_read *readcmd;
+ long ret;
+
+ aac_fib_init(fib);
+ readcmd = (struct aac_read *) fib_data(fib);
+ readcmd->command = cpu_to_le32(VM_CtBlockRead);
+ readcmd->cid = cpu_to_le32(scmd_id(cmd));
+ readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->count = cpu_to_le32(count * 512);
+
+ ret = aac_build_sg(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_read) +
+ ((le32_to_cpu(readcmd->sg.count) - 1) *
+ sizeof (struct sgentry));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+ struct aac_dev *dev = fib->dev;
+ u16 fibsize, command;
+ long ret;
+
+ aac_fib_init(fib);
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ struct aac_raw_io2 *writecmd2;
+ writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
+ memset(writecmd2, 0, sizeof(struct aac_raw_io2));
+ writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ writecmd2->byteCount = cpu_to_le32(count<<9);
+ writecmd2->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
+ (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+ cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
+ cpu_to_le16(RIO2_IO_TYPE_WRITE);
+ ret = aac_build_sgraw2(cmd, writecmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo2;
+ fibsize = sizeof(struct aac_raw_io2) +
+ ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ } else {
+ struct aac_raw_io *writecmd;
+ writecmd = (struct aac_raw_io *) fib_data(fib);
+ writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ writecmd->count = cpu_to_le32(count<<9);
+ writecmd->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
+ (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+ cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
+ cpu_to_le16(RIO_TYPE_WRITE);
+ writecmd->bpTotal = 0;
+ writecmd->bpComplete = 0;
+ ret = aac_build_sgraw(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
+ command = ContainerRawIo;
+ fibsize = sizeof(struct aac_raw_io) +
+ ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
+ }
+
+ BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(command,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+ u16 fibsize;
+ struct aac_write64 *writecmd;
+ long ret;
+
+ aac_fib_init(fib);
+ writecmd = (struct aac_write64 *) fib_data(fib);
+ writecmd->command = cpu_to_le32(VM_CtHostWrite64);
+ writecmd->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd->sector_count = cpu_to_le16(count);
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->pad = 0;
+ writecmd->flags = 0;
+
+ ret = aac_build_sg64(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_write64) +
+ ((le32_to_cpu(writecmd->sg.count) - 1) *
+ sizeof (struct sgentry64));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand64,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+ u16 fibsize;
+ struct aac_write *writecmd;
+ long ret;
+
+ aac_fib_init(fib);
+ writecmd = (struct aac_write *) fib_data(fib);
+ writecmd->command = cpu_to_le32(VM_CtBlockWrite);
+ writecmd->cid = cpu_to_le32(scmd_id(cmd));
+ writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->count = cpu_to_le32(count * 512);
+ writecmd->sg.count = cpu_to_le32(1);
+ /* ->stable is not used - it did mean which type of write */
+
+ ret = aac_build_sg(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
+ fibsize = sizeof(struct aac_write) +
+ ((le32_to_cpu(writecmd->sg.count) - 1) *
+ sizeof (struct sgentry));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ContainerCommand,
+ fib,
+ fibsize,
+ FsaNormal,
+ 0, 1,
+ (fib_callback) io_callback,
+ (void *) cmd);
+}
+
+static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ struct aac_srb * srbcmd;
+ u32 flag;
+ u32 timeout;
+
+ aac_fib_init(fib);
+ switch(cmd->sc_data_direction){
+ case DMA_TO_DEVICE:
+ flag = SRB_DataOut;
+ break;
+ case DMA_BIDIRECTIONAL:
+ flag = SRB_DataIn | SRB_DataOut;
+ break;
+ case DMA_FROM_DEVICE:
+ flag = SRB_DataIn;
+ break;
+ case DMA_NONE:
+ default: /* shuts up some versions of gcc */
+ flag = SRB_NoDataXfer;
+ break;
}
+
+ srbcmd = (struct aac_srb*) fib_data(fib);
+ srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
+ srbcmd->id = cpu_to_le32(scmd_id(cmd));
+ srbcmd->lun = cpu_to_le32(cmd->device->lun);
+ srbcmd->flags = cpu_to_le32(flag);
+ timeout = cmd->request->timeout/HZ;
+ if (timeout == 0)
+ timeout = 1;
+ srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
+ srbcmd->retry_limit = 0; /* Obsolete parameter */
+ srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
+ return srbcmd;
+}
+
+static void aac_srb_callback(void *context, struct fib * fibptr);
+
+static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ u16 fibsize;
+ struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
+
+ ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
+ if (ret < 0)
+ return ret;
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
+
+ memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+ memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
+ /*
+ * Build Scatter/Gather list
+ */
+ fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
+ ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
+ sizeof (struct sgentry64));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ScsiPortCommand64, fib,
+ fibsize, FsaNormal, 0, 1,
+ (fib_callback) aac_srb_callback,
+ (void *) cmd);
+}
+
+static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ u16 fibsize;
+ struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
+
+ ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
+ if (ret < 0)
+ return ret;
+ srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
+
+ memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+ memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
+ /*
+ * Build Scatter/Gather list
+ */
+ fibsize = sizeof (struct aac_srb) +
+ (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
+ sizeof (struct sgentry));
+ BUG_ON (fibsize > (fib->dev->max_fib_size -
+ sizeof(struct aac_fibhdr)));
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
+ (fib_callback) aac_srb_callback, (void *) cmd);
+}
+
+static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
+{
+ if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
+ (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
+ return FAILED;
+ return aac_scsi_32(fib, cmd);
}
int aac_get_adapter_info(struct aac_dev* dev)
@@ -618,55 +1338,65 @@ int aac_get_adapter_info(struct aac_dev* dev)
struct aac_bus_info *command;
struct aac_bus_info_response *bus_info;
- if (!(fibptr = fib_alloc(dev)))
+ if (!(fibptr = aac_fib_alloc(dev)))
return -ENOMEM;
- fib_init(fibptr);
+ aac_fib_init(fibptr);
info = (struct aac_adapter_info *) fib_data(fibptr);
memset(info,0,sizeof(*info));
- rcode = fib_send(RequestAdapterInfo,
- fibptr,
+ rcode = aac_fib_send(RequestAdapterInfo,
+ fibptr,
sizeof(*info),
- FsaNormal,
- 1, 1,
- NULL,
+ FsaNormal,
+ -1, 1, /* First `interrupt' command uses special wait */
+ NULL,
NULL);
if (rcode < 0) {
- fib_complete(fibptr);
- fib_free(fibptr);
+ /* FIB should be freed only after
+ * getting the response from the F/W */
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ }
return rcode;
}
memcpy(&dev->adapter_info, info, sizeof(*info));
if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
- struct aac_supplement_adapter_info * info;
+ struct aac_supplement_adapter_info * sinfo;
- fib_init(fibptr);
+ aac_fib_init(fibptr);
- info = (struct aac_supplement_adapter_info *) fib_data(fibptr);
+ sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
- memset(info,0,sizeof(*info));
+ memset(sinfo,0,sizeof(*sinfo));
- rcode = fib_send(RequestSupplementAdapterInfo,
+ rcode = aac_fib_send(RequestSupplementAdapterInfo,
fibptr,
- sizeof(*info),
+ sizeof(*sinfo),
FsaNormal,
1, 1,
NULL,
NULL);
if (rcode >= 0)
- memcpy(&dev->supplement_adapter_info, info, sizeof(*info));
+ memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
+ if (rcode == -ERESTARTSYS) {
+ fibptr = aac_fib_alloc(dev);
+ if (!fibptr)
+ return -ENOMEM;
+ }
+
}
- /*
- * GetBusInfo
+ /*
+ * GetBusInfo
*/
- fib_init(fibptr);
+ aac_fib_init(fibptr);
bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
@@ -679,21 +1409,25 @@ int aac_get_adapter_info(struct aac_dev* dev)
command->MethodId = cpu_to_le32(1);
command->CtlCmd = cpu_to_le32(GetBusInfo);
- rcode = fib_send(ContainerCommand,
+ rcode = aac_fib_send(ContainerCommand,
fibptr,
sizeof (*bus_info),
FsaNormal,
1, 1,
NULL, NULL);
+ /* reasoned default */
+ dev->maximum_num_physicals = 16;
if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
- tmp = le32_to_cpu(dev->adapter_info.kernelrev);
- printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
- dev->name,
+ if (!dev->in_reset) {
+ char buffer[16];
+ tmp = le32_to_cpu(dev->adapter_info.kernelrev);
+ printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
+ dev->name,
dev->id,
tmp>>24,
(tmp>>16)&0xff,
@@ -701,26 +1435,42 @@ int aac_get_adapter_info(struct aac_dev* dev)
le32_to_cpu(dev->adapter_info.kernelbuild),
(int)sizeof(dev->supplement_adapter_info.BuildDate),
dev->supplement_adapter_info.BuildDate);
- tmp = le32_to_cpu(dev->adapter_info.monitorrev);
- printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
+ tmp = le32_to_cpu(dev->adapter_info.monitorrev);
+ printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,tmp&0xff,
le32_to_cpu(dev->adapter_info.monitorbuild));
- tmp = le32_to_cpu(dev->adapter_info.biosrev);
- printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
+ tmp = le32_to_cpu(dev->adapter_info.biosrev);
+ printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
dev->name, dev->id,
tmp>>24,(tmp>>16)&0xff,tmp&0xff,
le32_to_cpu(dev->adapter_info.biosbuild));
- if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
- printk(KERN_INFO "%s%d: serial %x\n",
- dev->name, dev->id,
- le32_to_cpu(dev->adapter_info.serial[0]));
+ buffer[0] = '\0';
+ if (aac_get_serial_number(
+ shost_to_class(dev->scsi_host_ptr), buffer))
+ printk(KERN_INFO "%s%d: serial %s",
+ dev->name, dev->id, buffer);
+ if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
+ printk(KERN_INFO "%s%d: TSID %.*s\n",
+ dev->name, dev->id,
+ (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
+ dev->supplement_adapter_info.VpdInfo.Tsid);
+ }
+ if (!aac_check_reset || ((aac_check_reset == 1) &&
+ (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET))) {
+ printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
+ dev->name, dev->id);
+ }
+ }
+ dev->cache_protected = 0;
+ dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
+ AAC_FEATURE_JBOD) != 0);
dev->nondasd_support = 0;
dev->raid_scsi_mode = 0;
- if(dev->adapter_info.options & AAC_OPT_NONDASD){
+ if(dev->adapter_info.options & AAC_OPT_NONDASD)
dev->nondasd_support = 1;
- }
/*
* If the firmware supports ROMB RAID/SCSI mode and we are currently
@@ -741,30 +1491,43 @@ int aac_get_adapter_info(struct aac_dev* dev)
if (dev->raid_scsi_mode != 0)
printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
dev->name, dev->id);
-
- if(nondasd != -1) {
+
+ if (nondasd != -1)
dev->nondasd_support = (nondasd!=0);
- }
- if(dev->nondasd_support != 0){
+ if (dev->nondasd_support && !dev->in_reset)
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
- }
+ if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
+ dev->needs_dac = 1;
dev->dac_support = 0;
- if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
- printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id);
+ if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
+ (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
+ if (!dev->in_reset)
+ printk(KERN_INFO "%s%d: 64bit support enabled.\n",
+ dev->name, dev->id);
dev->dac_support = 1;
}
if(dacmode != -1) {
dev->dac_support = (dacmode!=0);
}
+
+ /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
+ if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
+ & AAC_QUIRK_SCSI_32)) {
+ dev->nondasd_support = 0;
+ dev->jbod = 0;
+ expose_physicals = 0;
+ }
+
if(dev->dac_support != 0) {
- if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) &&
- !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) {
- printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
- dev->name, dev->id);
- } else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) &&
- !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) {
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+ if (!dev->in_reset)
+ printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
+ dev->name, dev->id);
+ } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
+ !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
dev->name, dev->id);
dev->dac_support = 0;
@@ -774,265 +1537,274 @@ int aac_get_adapter_info(struct aac_dev* dev)
rcode = -ENOMEM;
}
}
- /*
- * 57 scatter gather elements
+ /*
+ * Deal with configuring for the individualized limits of each packet
+ * interface.
*/
- dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
- sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write) + sizeof(struct sgmap)) /
- sizeof(struct sgmap);
- if (dev->dac_support) {
- /*
- * 38 scatter gather elements
- */
- dev->scsi_host_ptr->sg_tablesize =
- (dev->max_fib_size -
+ dev->a_ops.adapter_scsi = (dev->dac_support)
+ ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
+ ? aac_scsi_32_64
+ : aac_scsi_64)
+ : aac_scsi_32;
+ if (dev->raw_io_interface) {
+ dev->a_ops.adapter_bounds = (dev->raw_io_64)
+ ? aac_bounds_64
+ : aac_bounds_32;
+ dev->a_ops.adapter_read = aac_read_raw_io;
+ dev->a_ops.adapter_write = aac_write_raw_io;
+ } else {
+ dev->a_ops.adapter_bounds = aac_bounds_32;
+ dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_fibhdr) -
- sizeof(struct aac_write64) +
- sizeof(struct sgmap64)) /
- sizeof(struct sgmap64);
+ sizeof(struct aac_write) + sizeof(struct sgentry)) /
+ sizeof(struct sgentry);
+ if (dev->dac_support) {
+ dev->a_ops.adapter_read = aac_read_block64;
+ dev->a_ops.adapter_write = aac_write_block64;
+ /*
+ * 38 scatter gather elements
+ */
+ dev->scsi_host_ptr->sg_tablesize =
+ (dev->max_fib_size -
+ sizeof(struct aac_fibhdr) -
+ sizeof(struct aac_write64) +
+ sizeof(struct sgentry64)) /
+ sizeof(struct sgentry64);
+ } else {
+ dev->a_ops.adapter_read = aac_read_block;
+ dev->a_ops.adapter_write = aac_write_block;
+ }
+ dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
+ if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
+ /*
+ * Worst case size that could cause sg overflow when
+ * we break up SG elements that are larger than 64KB.
+ * Would be nice if we could tell the SCSI layer what
+ * the maximum SG element size can be. Worst case is
+ * (sg_tablesize-1) 4KB elements with one 64KB
+ * element.
+ * 32bit -> 468 or 238KB 64bit -> 424 or 212KB
+ */
+ dev->scsi_host_ptr->max_sectors =
+ (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
+ }
}
- dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
- if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
- /*
- * Worst case size that could cause sg overflow when
- * we break up SG elements that are larger than 64KB.
- * Would be nice if we could tell the SCSI layer what
- * the maximum SG element size can be. Worst case is
- * (sg_tablesize-1) 4KB elements with one 64KB
- * element.
- * 32bit -> 468 or 238KB 64bit -> 424 or 212KB
- */
- dev->scsi_host_ptr->max_sectors =
- (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
+ /* FIB should be freed only after getting the response from the F/W */
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
}
- fib_complete(fibptr);
- fib_free(fibptr);
-
return rcode;
}
-static void read_callback(void *context, struct fib * fibptr)
+static void io_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_read_reply *readreply;
struct scsi_cmnd *scsicmd;
- u32 lba;
u32 cid;
scsicmd = (struct scsi_cmnd *) context;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
-
- lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
- dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
-
- if (fibptr == NULL)
- BUG();
-
- if(scsicmd->use_sg)
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)scsicmd->buffer,
- scsicmd->use_sg,
- scsicmd->sc_data_direction);
- else if(scsicmd->request_bufflen)
- pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ dev = fibptr->dev;
+ cid = scmd_id(scsicmd);
+
+ if (nblank(dprintk(x))) {
+ u64 lba;
+ switch (scsicmd->cmnd[0]) {
+ case WRITE_6:
+ case READ_6:
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ break;
+ case WRITE_16:
+ case READ_16:
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ case WRITE_12:
+ case READ_12:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ default:
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ break;
+ }
+ printk(KERN_DEBUG
+ "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
+ smp_processor_id(), (unsigned long long)lba, jiffies);
+ }
+
+ BUG_ON(fibptr == NULL);
+
+ scsi_dma_unmap(scsicmd);
+
readreply = (struct aac_read_reply *)fib_data(fibptr);
- if (le32_to_cpu(readreply->status) == ST_OK)
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- else {
+ switch (le32_to_cpu(readreply->status)) {
+ case ST_OK:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
+ break;
+ case ST_NOT_READY:
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
+ SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ break;
+ default:
#ifdef AAC_DETAILED_STATUS_INFO
- printk(KERN_WARNING "read_callback: io failed, status = %d\n",
+ printk(KERN_WARNING "io_callback: io failed, status = %d\n",
le32_to_cpu(readreply->status));
#endif
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR,
- SENCODE_INTERNAL_TARGET_FAILURE,
- ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
- 0, 0);
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
- ? sizeof(scsicmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ break;
}
- fib_complete(fibptr);
- fib_free(fibptr);
-
- aac_io_done(scsicmd);
-}
-
-static void write_callback(void *context, struct fib * fibptr)
-{
- struct aac_dev *dev;
- struct aac_write_reply *writereply;
- struct scsi_cmnd *scsicmd;
- u32 lba;
- u32 cid;
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
- scsicmd = (struct scsi_cmnd *) context;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
-
- lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
- dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
- if (fibptr == NULL)
- BUG();
-
- if(scsicmd->use_sg)
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)scsicmd->buffer,
- scsicmd->use_sg,
- scsicmd->sc_data_direction);
- else if(scsicmd->request_bufflen)
- pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
-
- writereply = (struct aac_write_reply *) fib_data(fibptr);
- if (le32_to_cpu(writereply->status) == ST_OK)
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
- else {
- printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR,
- SENCODE_INTERNAL_TARGET_FAILURE,
- ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
- 0, 0);
- memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- sizeof(struct sense_data));
- }
-
- fib_complete(fibptr);
- fib_free(fibptr);
- aac_io_done(scsicmd);
+ scsicmd->scsi_done(scsicmd);
}
-static int aac_read(struct scsi_cmnd * scsicmd, int cid)
+static int aac_read(struct scsi_cmnd * scsicmd)
{
- u32 lba;
+ u64 lba;
u32 count;
int status;
-
- u16 fibsize;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
+ int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
* Get block address and transfer length
*/
- if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
- {
- dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
+ switch (scsicmd->cmnd[0]) {
+ case READ_6:
+ dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
- lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+ lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+ (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
- } else {
- dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
+ break;
+ case READ_16:
+ dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) |
+ (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ break;
+ case READ_12:
+ dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ break;
+ default:
+ dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
- lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ lba = ((u64)scsicmd->cmnd[2] << 24) |
+ (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ break;
}
- dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n",
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
+ }
+
+ dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
+ if (aac_adapter_bounds(dev,scsicmd,lba))
+ return 0;
/*
* Alocate and initialize a Fib
*/
- if (!(cmd_fibcontext = fib_alloc(dev))) {
+ if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ printk(KERN_WARNING "aac_read: fib allocation failed\n");
return -1;
}
- fib_init(cmd_fibcontext);
-
- if (dev->dac_support == 1) {
- struct aac_read64 *readcmd;
- readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
- readcmd->command = cpu_to_le32(VM_CtHostRead64);
- readcmd->cid = cpu_to_le16(cid);
- readcmd->sector_count = cpu_to_le16(count);
- readcmd->block = cpu_to_le32(lba);
- readcmd->pad = 0;
- readcmd->flags = 0;
-
- aac_build_sg64(scsicmd, &readcmd->sg);
- fibsize = sizeof(struct aac_read64) +
- ((le32_to_cpu(readcmd->sg.count) - 1) *
- sizeof (struct sgentry64));
- BUG_ON (fibsize > (sizeof(struct hw_fib) -
- sizeof(struct aac_fibhdr)));
- /*
- * Now send the Fib to the adapter
- */
- status = fib_send(ContainerCommand64,
- cmd_fibcontext,
- fibsize,
- FsaNormal,
- 0, 1,
- (fib_callback) read_callback,
- (void *) scsicmd);
- } else {
- struct aac_read *readcmd;
- readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
- readcmd->command = cpu_to_le32(VM_CtBlockRead);
- readcmd->cid = cpu_to_le32(cid);
- readcmd->block = cpu_to_le32(lba);
- readcmd->count = cpu_to_le32(count * 512);
-
- aac_build_sg(scsicmd, &readcmd->sg);
- fibsize = sizeof(struct aac_read) +
- ((le32_to_cpu(readcmd->sg.count) - 1) *
- sizeof (struct sgentry));
- BUG_ON (fibsize > (dev->max_fib_size -
- sizeof(struct aac_fibhdr)));
- /*
- * Now send the Fib to the adapter
- */
- status = fib_send(ContainerCommand,
- cmd_fibcontext,
- fibsize,
- FsaNormal,
- 0, 1,
- (fib_callback) read_callback,
- (void *) scsicmd);
- }
-
-
+ status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
/*
* Check that the command queued to the controller
*/
- if (status == -EINPROGRESS)
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
-
- printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
+ }
+
+ printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
- aac_io_done(scsicmd);
- fib_complete(cmd_fibcontext);
- fib_free(cmd_fibcontext);
+ scsicmd->scsi_done(scsicmd);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
return 0;
}
-static int aac_write(struct scsi_cmnd * scsicmd, int cid)
+static int aac_write(struct scsi_cmnd * scsicmd)
{
- u32 lba;
+ u64 lba;
u32 count;
+ int fua;
int status;
- u16 fibsize;
struct aac_dev *dev;
struct fib * cmd_fibcontext;
+ int cid;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
/*
@@ -1044,94 +1816,87 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
count = scsicmd->cmnd[4];
if (count == 0)
count = 256;
+ fua = 0;
+ } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 56) |
+ ((u64)scsicmd->cmnd[3] << 48) |
+ ((u64)scsicmd->cmnd[4] << 40) |
+ ((u64)scsicmd->cmnd[5] << 32) |
+ ((u64)scsicmd->cmnd[6] << 24) |
+ (scsicmd->cmnd[7] << 16) |
+ (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
+ (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+ fua = scsicmd->cmnd[1] & 0x8;
+ } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
+ dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
+
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
+ | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
+ | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+ fua = scsicmd->cmnd[1] & 0x8;
} else {
- dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
- lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
+ lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+ fua = scsicmd->cmnd[1] & 0x8;
+ }
+
+ if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+ cid = scmd_id(scsicmd);
+ dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 1;
}
- dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n",
+
+ dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
smp_processor_id(), (unsigned long long)lba, jiffies));
+ if (aac_adapter_bounds(dev,scsicmd,lba))
+ return 0;
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
- if (!(cmd_fibcontext = fib_alloc(dev))) {
- scsicmd->result = DID_ERROR << 16;
- aac_io_done(scsicmd);
- return 0;
- }
- fib_init(cmd_fibcontext);
-
- if(dev->dac_support == 1) {
- struct aac_write64 *writecmd;
- writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
- writecmd->command = cpu_to_le32(VM_CtHostWrite64);
- writecmd->cid = cpu_to_le16(cid);
- writecmd->sector_count = cpu_to_le16(count);
- writecmd->block = cpu_to_le32(lba);
- writecmd->pad = 0;
- writecmd->flags = 0;
-
- aac_build_sg64(scsicmd, &writecmd->sg);
- fibsize = sizeof(struct aac_write64) +
- ((le32_to_cpu(writecmd->sg.count) - 1) *
- sizeof (struct sgentry64));
- BUG_ON (fibsize > (dev->max_fib_size -
- sizeof(struct aac_fibhdr)));
- /*
- * Now send the Fib to the adapter
- */
- status = fib_send(ContainerCommand64,
- cmd_fibcontext,
- fibsize,
- FsaNormal,
- 0, 1,
- (fib_callback) write_callback,
- (void *) scsicmd);
- } else {
- struct aac_write *writecmd;
- writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
- writecmd->command = cpu_to_le32(VM_CtBlockWrite);
- writecmd->cid = cpu_to_le32(cid);
- writecmd->block = cpu_to_le32(lba);
- writecmd->count = cpu_to_le32(count * 512);
- writecmd->sg.count = cpu_to_le32(1);
- /* ->stable is not used - it did mean which type of write */
-
- aac_build_sg(scsicmd, &writecmd->sg);
- fibsize = sizeof(struct aac_write) +
- ((le32_to_cpu(writecmd->sg.count) - 1) *
- sizeof (struct sgentry));
- BUG_ON (fibsize > (dev->max_fib_size -
- sizeof(struct aac_fibhdr)));
- /*
- * Now send the Fib to the adapter
+ if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ /* FIB temporarily unavailable,not catastrophic failure */
+
+ /* scsicmd->result = DID_ERROR << 16;
+ * scsicmd->scsi_done(scsicmd);
+ * return 0;
*/
- status = fib_send(ContainerCommand,
- cmd_fibcontext,
- fibsize,
- FsaNormal,
- 0, 1,
- (fib_callback) write_callback,
- (void *) scsicmd);
+ printk(KERN_WARNING "aac_write: fib allocation failed\n");
+ return -1;
}
+ status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
+
/*
* Check that the command queued to the controller
*/
- if (status == -EINPROGRESS)
- {
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
- printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
+ printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
- aac_io_done(scsicmd);
+ scsicmd->scsi_done(scsicmd);
- fib_complete(cmd_fibcontext);
- fib_free(cmd_fibcontext);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
return 0;
}
@@ -1142,40 +1907,41 @@ static void synchronize_callback(void *context, struct fib *fibptr)
cmd = context;
- dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
+ if (!aac_valid_context(cmd, fibptr))
+ return;
+
+ dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
smp_processor_id(), jiffies));
BUG_ON(fibptr == NULL);
synchronizereply = fib_data(fibptr);
if (le32_to_cpu(synchronizereply->status) == CT_OK)
- cmd->result = DID_OK << 16 |
+ cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
else {
struct scsi_device *sdev = cmd->device;
- struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
- u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun);
- printk(KERN_WARNING
+ struct aac_dev *dev = fibptr->dev;
+ u32 cid = sdev_id(sdev);
+ printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
le32_to_cpu(synchronizereply->status));
- cmd->result = DID_OK << 16 |
+ cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR,
- SENCODE_INTERNAL_TARGET_FAILURE,
- ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
- 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- min(sizeof(dev->fsa_dev[cid].sense_data),
- sizeof(cmd->sense_buffer)));
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
}
- fib_complete(fibptr);
- fib_free(fibptr);
- aac_io_done(cmd);
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ cmd->scsi_done(cmd);
}
-static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
+static int aac_synchronize(struct scsi_cmnd *scsicmd)
{
int status;
struct fib *cmd_fibcontext;
@@ -1183,15 +1949,63 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
struct scsi_cmnd *cmd;
struct scsi_device *sdev = scsicmd->device;
int active = 0;
+ struct aac_dev *aac;
+ u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
+ (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+ u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
unsigned long flags;
/*
- * Wait for all commands to complete to this specific
- * target (block).
+ * Wait for all outstanding queued commands to complete to this
+ * specific target (block).
*/
spin_lock_irqsave(&sdev->list_lock, flags);
list_for_each_entry(cmd, &sdev->cmd_list, list)
- if (cmd != scsicmd && cmd->serial_number != 0) {
+ if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
+ u64 cmnd_lba;
+ u32 cmnd_count;
+
+ if (cmd->cmnd[0] == WRITE_6) {
+ cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
+ (cmd->cmnd[2] << 8) |
+ cmd->cmnd[3];
+ cmnd_count = cmd->cmnd[4];
+ if (cmnd_count == 0)
+ cmnd_count = 256;
+ } else if (cmd->cmnd[0] == WRITE_16) {
+ cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
+ ((u64)cmd->cmnd[3] << 48) |
+ ((u64)cmd->cmnd[4] << 40) |
+ ((u64)cmd->cmnd[5] << 32) |
+ ((u64)cmd->cmnd[6] << 24) |
+ (cmd->cmnd[7] << 16) |
+ (cmd->cmnd[8] << 8) |
+ cmd->cmnd[9];
+ cmnd_count = (cmd->cmnd[10] << 24) |
+ (cmd->cmnd[11] << 16) |
+ (cmd->cmnd[12] << 8) |
+ cmd->cmnd[13];
+ } else if (cmd->cmnd[0] == WRITE_12) {
+ cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
+ (cmd->cmnd[3] << 16) |
+ (cmd->cmnd[4] << 8) |
+ cmd->cmnd[5];
+ cmnd_count = (cmd->cmnd[6] << 24) |
+ (cmd->cmnd[7] << 16) |
+ (cmd->cmnd[8] << 8) |
+ cmd->cmnd[9];
+ } else if (cmd->cmnd[0] == WRITE_10) {
+ cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
+ (cmd->cmnd[3] << 16) |
+ (cmd->cmnd[4] << 8) |
+ cmd->cmnd[5];
+ cmnd_count = (cmd->cmnd[7] << 8) |
+ cmd->cmnd[8];
+ } else
+ continue;
+ if (((cmnd_lba + cmnd_count) < lba) ||
+ (count && ((lba + count) < cmnd_lba)))
+ continue;
++active;
break;
}
@@ -1204,26 +2018,29 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
if (active)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ aac = (struct aac_dev *)sdev->host->hostdata;
+ if (aac->in_reset)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
/*
* Allocate and initialize a Fib
*/
- if (!(cmd_fibcontext =
- fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata)))
+ if (!(cmd_fibcontext = aac_fib_alloc(aac)))
return SCSI_MLQUEUE_HOST_BUSY;
- fib_init(cmd_fibcontext);
+ aac_fib_init(cmd_fibcontext);
synchronizecmd = fib_data(cmd_fibcontext);
synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
- synchronizecmd->cid = cpu_to_le32(cid);
- synchronizecmd->count =
+ synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
+ synchronizecmd->count =
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
/*
* Now send the Fib to the adapter
*/
- status = fib_send(ContainerCommand,
+ status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
sizeof(struct aac_synchronize),
FsaNormal,
@@ -1234,13 +2051,93 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
/*
* Check that the command queued to the controller
*/
- if (status == -EINPROGRESS)
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ return 0;
+ }
+
+ printk(KERN_WARNING
+ "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static void aac_start_stop_callback(void *context, struct fib *fibptr)
+{
+ struct scsi_cmnd *scsicmd = context;
+
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ BUG_ON(fibptr == NULL);
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
+}
+
+static int aac_start_stop(struct scsi_cmnd *scsicmd)
+{
+ int status;
+ struct fib *cmd_fibcontext;
+ struct aac_power_management *pmcmd;
+ struct scsi_device *sdev = scsicmd->device;
+ struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+
+ if (!(aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_POWER_MANAGEMENT)) {
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+
+ if (aac->in_reset)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Allocate and initialize a Fib
+ */
+ cmd_fibcontext = aac_fib_alloc(aac);
+ if (!cmd_fibcontext)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ aac_fib_init(cmd_fibcontext);
+
+ pmcmd = fib_data(cmd_fibcontext);
+ pmcmd->command = cpu_to_le32(VM_ContainerConfig);
+ pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
+ /* Eject bit ignored, not relevant */
+ pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
+ cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
+ pmcmd->cid = cpu_to_le32(sdev_id(sdev));
+ pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
+ cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
+
+ /*
+ * Now send the Fib to the adapter
+ */
+ status = aac_fib_send(ContainerCommand,
+ cmd_fibcontext,
+ sizeof(struct aac_power_management),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_start_stop_callback,
+ (void *)scsicmd);
+
+ /*
+ * Check that the command queued to the controller
+ */
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
+ }
- printk(KERN_WARNING
- "aac_synchronize: fib_send failed with status: %d.\n", status);
- fib_complete(cmd_fibcontext);
- fib_free(cmd_fibcontext);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1251,62 +2148,60 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
* Emulate a SCSI command and queue the required request for the
* aacraid firmware.
*/
-
+
int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
- u32 cid = 0;
+ u32 cid;
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
- int cardtype = dev->cardtype;
- int ret;
-
+
+ if (fsa_dev_ptr == NULL)
+ return -1;
/*
* If the bus, id or lun is out of range, return fail
* Test does not apply to ID 16, the pseudo id for the controller
* itself.
*/
- if (scsicmd->device->id != host->this_id) {
- if ((scsicmd->device->channel == 0) ){
- if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){
+ cid = scmd_id(scsicmd);
+ if (cid != host->this_id) {
+ if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
+ if((cid >= dev->maximum_num_containers) ||
+ (scsicmd->device->lun != 0)) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
- cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
/*
* If the target container doesn't exist, it may have
* been newly created
*/
- if ((fsa_dev_ptr[cid].valid & 1) == 0) {
+ if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
+ (fsa_dev_ptr[cid].sense_data.sense_key ==
+ NOT_READY)) {
switch (scsicmd->cmnd[0]) {
+ case SERVICE_ACTION_IN:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
case INQUIRY:
case READ_CAPACITY:
case TEST_UNIT_READY:
- spin_unlock_irq(host->host_lock);
- probe_container(dev, cid);
- spin_lock_irq(host->host_lock);
- if (fsa_dev_ptr[cid].valid == 0) {
- scsicmd->result = DID_NO_CONNECT << 16;
- scsicmd->scsi_done(scsicmd);
- return 0;
- }
+ if (dev->in_reset)
+ return -1;
+ return _aac_probe_container(scsicmd,
+ aac_probe_container_callback2);
default:
break;
}
}
- /*
- * If the target container still doesn't exist,
- * return failure
- */
- if (fsa_dev_ptr[cid].valid == 0) {
- scsicmd->result = DID_BAD_TARGET << 16;
- scsicmd->scsi_done(scsicmd);
- return 0;
- }
} else { /* check for physical non-dasd devices */
- if(dev->nondasd_support == 1){
+ if (dev->nondasd_support || expose_physicals ||
+ dev->jbod) {
+ if (dev->in_reset)
+ return -1;
return aac_send_srb_fib(scsicmd);
} else {
scsicmd->result = DID_NO_CONNECT << 16;
@@ -1319,18 +2214,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* else Command for the controller itself
*/
else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
- (scsicmd->cmnd[0] != TEST_UNIT_READY))
+ (scsicmd->cmnd[0] != TEST_UNIT_READY))
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- ILLEGAL_REQUEST,
- SENCODE_INVALID_COMMAND,
- ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+ ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
- ? sizeof(scsicmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 0;
}
@@ -1340,44 +2233,138 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
switch (scsicmd->cmnd[0]) {
case INQUIRY:
{
- struct inquiry_data *inq_data_ptr;
-
- dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
- inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
- memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
-
- inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */
- inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
- inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
- inq_data_ptr->inqd_len = 31;
+ struct inquiry_data inq_data;
+
+ dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
+ memset(&inq_data, 0, sizeof (struct inquiry_data));
+
+ if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
+ char *arr = (char *)&inq_data;
+
+ /* EVPD bit set */
+ arr[0] = (scmd_id(scsicmd) == host->this_id) ?
+ INQD_PDT_PROC : INQD_PDT_DA;
+ if (scsicmd->cmnd[2] == 0) {
+ /* supported vital product data pages */
+ arr[3] = 2;
+ arr[4] = 0x0;
+ arr[5] = 0x80;
+ arr[1] = scsicmd->cmnd[2];
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+ sizeof(inq_data));
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else if (scsicmd->cmnd[2] == 0x80) {
+ /* unit serial number page */
+ arr[3] = setinqserial(dev, &arr[4],
+ scmd_id(scsicmd));
+ arr[1] = scsicmd->cmnd[2];
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+ sizeof(inq_data));
+ if (aac_wwn != 2)
+ return aac_get_container_serial(
+ scsicmd);
+ /* SLES 10 SP1 special */
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ } else {
+ /* vpd page not implemented */
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
+ ASENCODE_NO_SENSE, 7, 2);
+ memcpy(scsicmd->sense_buffer,
+ &dev->fsa_dev[cid].sense_data,
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ }
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
+ inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
+ inq_data.inqd_len = 31;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
- inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
+ inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
/*
* Set the Vendor, Product, and Revision Level
* see: <vendor>.c i.e. aac.c
*/
- if (scsicmd->device->id == host->this_id) {
- setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *)));
- inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
+ if (cid == host->this_id) {
+ setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
+ inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+ sizeof(inq_data));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
- setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type);
- inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
- return aac_get_container_name(scsicmd, cid);
+ if (dev->in_reset)
+ return -1;
+ setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
+ inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
+ scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+ return aac_get_container_name(scsicmd);
}
+ case SERVICE_ACTION_IN:
+ if (!(dev->raw_io_interface) ||
+ !(dev->raw_io_64) ||
+ ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ {
+ u64 capacity;
+ char cp[13];
+ unsigned int alloc_len;
+
+ dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
+ capacity = fsa_dev_ptr[cid].size - 1;
+ cp[0] = (capacity >> 56) & 0xff;
+ cp[1] = (capacity >> 48) & 0xff;
+ cp[2] = (capacity >> 40) & 0xff;
+ cp[3] = (capacity >> 32) & 0xff;
+ cp[4] = (capacity >> 24) & 0xff;
+ cp[5] = (capacity >> 16) & 0xff;
+ cp[6] = (capacity >> 8) & 0xff;
+ cp[7] = (capacity >> 0) & 0xff;
+ cp[8] = 0;
+ cp[9] = 0;
+ cp[10] = 2;
+ cp[11] = 0;
+ cp[12] = 0;
+
+ alloc_len = ((scsicmd->cmnd[10] << 24)
+ + (scsicmd->cmnd[11] << 16)
+ + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
+
+ alloc_len = min_t(size_t, alloc_len, sizeof(cp));
+ scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
+ if (alloc_len < scsi_bufflen(scsicmd))
+ scsi_set_resid(scsicmd,
+ scsi_bufflen(scsicmd) - alloc_len);
+
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
+
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+
+ return 0;
+ }
+
case READ_CAPACITY:
{
u32 capacity;
- char *cp;
+ char cp[8];
dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
- if (fsa_dev_ptr[cid].size <= 0x100000000LL)
+ if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
capacity = fsa_dev_ptr[cid].size - 1;
else
capacity = (u32)-1;
- cp = scsicmd->request_buffer;
+
cp[0] = (capacity >> 24) & 0xff;
cp[1] = (capacity >> 16) & 0xff;
cp[2] = (capacity >> 8) & 0xff;
@@ -1386,8 +2373,11 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[5] = 0;
cp[6] = 2;
cp[7] = 0;
-
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
+ /* Do not cache partition table for arrays */
+ scsicmd->device->removable = 1;
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
@@ -1395,15 +2385,30 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case MODE_SENSE:
{
- char *mode_buf;
+ char mode_buf[7];
+ int mode_buf_length = 4;
dprintk((KERN_DEBUG "MODE SENSE command.\n"));
- mode_buf = scsicmd->request_buffer;
mode_buf[0] = 3; /* Mode data length */
mode_buf[1] = 0; /* Medium type - default */
- mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
+ mode_buf[2] = 0; /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ if (dev->raw_io_interface && ((aac_cache & 5) != 1))
+ mode_buf[2] = 0x10;
mode_buf[3] = 0; /* Block descriptor length */
-
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mode_buf[0] = 6;
+ mode_buf[4] = 8;
+ mode_buf[5] = 1;
+ mode_buf[6] = ((aac_cache & 6) == 2)
+ ? 0 : 0x04; /* WCE */
+ mode_buf_length = 7;
+ if (mode_buf_length > scsicmd->cmnd[4])
+ mode_buf_length = scsicmd->cmnd[4];
+ }
+ scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -1411,18 +2416,34 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
case MODE_SENSE_10:
{
- char *mode_buf;
+ char mode_buf[11];
+ int mode_buf_length = 8;
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
- mode_buf = scsicmd->request_buffer;
mode_buf[0] = 0; /* Mode data length (MSB) */
mode_buf[1] = 6; /* Mode data length (LSB) */
mode_buf[2] = 0; /* Medium type - default */
- mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
+ mode_buf[3] = 0; /* Device-specific param,
+ bit 8: 0/1 = write enabled/protected
+ bit 4: 0/1 = FUA enabled */
+ if (dev->raw_io_interface && ((aac_cache & 5) != 1))
+ mode_buf[3] = 0x10;
mode_buf[4] = 0; /* reserved */
mode_buf[5] = 0; /* reserved */
mode_buf[6] = 0; /* Block descriptor length (MSB) */
mode_buf[7] = 0; /* Block descriptor length (LSB) */
+ if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+ ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+ mode_buf[1] = 9;
+ mode_buf[8] = 8;
+ mode_buf[9] = 1;
+ mode_buf[10] = ((aac_cache & 6) == 2)
+ ? 0 : 0x04; /* WCE */
+ mode_buf_length = 11;
+ if (mode_buf_length > scsicmd->cmnd[8])
+ mode_buf_length = scsicmd->cmnd[8];
+ }
+ scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
@@ -1451,61 +2472,88 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
* These commands are all No-Ops
*/
case TEST_UNIT_READY:
+ if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ NOT_READY, SENCODE_BECOMING_READY,
+ ASENCODE_BECOMING_READY, 0, 0);
+ memcpy(scsicmd->sense_buffer,
+ &dev->fsa_dev[cid].sense_data,
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
+ /* FALLTHRU */
case RESERVE:
case RELEASE:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- case START_STOP:
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
+
+ case START_STOP:
+ return aac_start_stop(scsicmd);
}
- switch (scsicmd->cmnd[0])
+ switch (scsicmd->cmnd[0])
{
case READ_6:
case READ_10:
+ case READ_12:
+ case READ_16:
+ if (dev->in_reset)
+ return -1;
/*
* Hack to keep track of ordinal number of the device that
* corresponds to a container. Needed to convert
* containers to /dev/sd device names
*/
-
- spin_unlock_irq(host->host_lock);
- if (scsicmd->request->rq_disk)
- memcpy(fsa_dev_ptr[cid].devname,
- scsicmd->request->rq_disk->disk_name,
- 8);
-
- ret = aac_read(scsicmd, cid);
- spin_lock_irq(host->host_lock);
- return ret;
+
+ if (scsicmd->request->rq_disk)
+ strlcpy(fsa_dev_ptr[cid].devname,
+ scsicmd->request->rq_disk->disk_name,
+ min(sizeof(fsa_dev_ptr[cid].devname),
+ sizeof(scsicmd->request->rq_disk->disk_name) + 1));
+
+ return aac_read(scsicmd);
case WRITE_6:
case WRITE_10:
- spin_unlock_irq(host->host_lock);
- ret = aac_write(scsicmd, cid);
- spin_lock_irq(host->host_lock);
- return ret;
+ case WRITE_12:
+ case WRITE_16:
+ if (dev->in_reset)
+ return -1;
+ return aac_write(scsicmd);
case SYNCHRONIZE_CACHE:
+ if (((aac_cache & 6) == 6) && dev->cache_protected) {
+ scsicmd->result = DID_OK << 16 |
+ COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->scsi_done(scsicmd);
+ return 0;
+ }
/* Issue FIB to tell Firmware to flush it's cache */
- return aac_synchronize(scsicmd, cid);
-
+ if ((aac_cache & 6) != 2)
+ return aac_synchronize(scsicmd);
+ /* FALLTHRU */
default:
/*
* Unhandled commands
*/
dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
- ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+ ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
- (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
- ? sizeof(scsicmd->sense_buffer)
- : sizeof(dev->fsa_dev[cid].sense_data));
+ min_t(size_t,
+ sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
scsicmd->scsi_done(scsicmd);
return 0;
}
@@ -1517,11 +2565,13 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
struct fsa_dev_info *fsa_dev_ptr;
fsa_dev_ptr = dev->fsa_dev;
+ if (!fsa_dev_ptr)
+ return -EBUSY;
if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
return -EFAULT;
if (qd.cnum == -1)
- qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun);
- else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
+ qd.cnum = qd.id;
+ else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
{
if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
return -EINVAL;
@@ -1532,7 +2582,7 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
}
else return -EINVAL;
- qd.valid = fsa_dev_ptr[qd.cnum].valid;
+ qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
qd.locked = fsa_dev_ptr[qd.cnum].locked;
qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
@@ -1555,6 +2605,8 @@ static int force_delete_disk(struct aac_dev *dev, void __user *arg)
struct fsa_dev_info *fsa_dev_ptr;
fsa_dev_ptr = dev->fsa_dev;
+ if (!fsa_dev_ptr)
+ return -EBUSY;
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
@@ -1578,6 +2630,8 @@ static int delete_disk(struct aac_dev *dev, void __user *arg)
struct fsa_dev_info *fsa_dev_ptr;
fsa_dev_ptr = dev->fsa_dev;
+ if (!fsa_dev_ptr)
+ return -EBUSY;
if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
return -EFAULT;
@@ -1632,29 +2686,36 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
struct scsi_cmnd *scsicmd;
scsicmd = (struct scsi_cmnd *) context;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (fibptr == NULL)
- BUG();
+ if (!aac_valid_context(scsicmd, fibptr))
+ return;
+
+ BUG_ON(fibptr == NULL);
+
+ dev = fibptr->dev;
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
- /*
- * Calculate resid for sg
- */
-
- scsicmd->resid = scsicmd->request_bufflen -
- le32_to_cpu(srbreply->data_xfer_length);
-
- if(scsicmd->use_sg)
- pci_unmap_sg(dev->pdev,
- (struct scatterlist *)scsicmd->buffer,
- scsicmd->use_sg,
- scsicmd->sc_data_direction);
- else if(scsicmd->request_bufflen)
- pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
+
+ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+ /* fast response */
+ srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
+ srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
+ } else {
+ /*
+ * Calculate resid for sg
+ */
+ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
+ - le32_to_cpu(srbreply->data_xfer_length));
+ }
+
+ scsi_dma_unmap(scsicmd);
+
+ /* expose physical device if expose_physicald flag is on */
+ if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+ && expose_physicals > 0)
+ aac_expose_phy_device(scsicmd);
/*
* First check the fib status
@@ -1663,10 +2724,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
if (le32_to_cpu(srbreply->status) != ST_OK){
int len;
printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
- len = (le32_to_cpu(srbreply->sense_data_size) >
- sizeof(scsicmd->sense_buffer)) ?
- sizeof(scsicmd->sense_buffer) :
- le32_to_cpu(srbreply->sense_data_size);
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
}
@@ -1678,33 +2737,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case SRB_STATUS_ERROR_RECOVERY:
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
- if(scsicmd->cmnd[0] == INQUIRY ){
- u8 b;
- u8 b1;
- /* We can't expose disk devices because we can't tell whether they
- * are the raw container drives or stand alone drives. If they have
- * the removable bit set then we should expose them though.
- */
- b = (*(u8*)scsicmd->buffer)&0x1f;
- b1 = ((u8*)scsicmd->buffer)[1];
- if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
- || (b==TYPE_DISK && (b1&0x80)) ){
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- /*
- * We will allow disk devices if in RAID/SCSI mode and
- * the channel is 2
- */
- } else if ((dev->raid_scsi_mode) &&
- (scsicmd->device->channel == 2)) {
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8;
- } else {
- scsicmd->result = DID_NO_CONNECT << 16 |
- COMMAND_COMPLETE << 8;
- }
- } else {
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- }
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_DATA_OVERRUN:
switch(scsicmd->cmnd[0]){
@@ -1714,7 +2747,9 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case WRITE_10:
case READ_12:
case WRITE_12:
- if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
+ case READ_16:
+ case WRITE_16:
+ if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) {
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
} else {
printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
@@ -1722,28 +2757,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
break;
case INQUIRY: {
- u8 b;
- u8 b1;
- /* We can't expose disk devices because we can't tell whether they
- * are the raw container drives or stand alone drives
- */
- b = (*(u8*)scsicmd->buffer)&0x0f;
- b1 = ((u8*)scsicmd->buffer)[1];
- if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
- || (b==TYPE_DISK && (b1&0x80)) ){
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
- /*
- * We will allow disk devices if in RAID/SCSI mode and
- * the channel is 2
- */
- } else if ((dev->raid_scsi_mode) &&
- (scsicmd->device->channel == 2)) {
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8;
- } else {
- scsicmd->result = DID_NO_CONNECT << 16 |
- COMMAND_COMPLETE << 8;
- }
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
break;
}
default:
@@ -1775,7 +2789,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
break;
case SRB_STATUS_BUSY:
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
break;
case SRB_STATUS_BUS_RESET:
@@ -1804,35 +2818,46 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
le32_to_cpu(srbreply->srb_status) & 0x3F,
aac_get_status_string(
- le32_to_cpu(srbreply->srb_status) & 0x3F),
- scsicmd->cmnd[0],
+ le32_to_cpu(srbreply->srb_status) & 0x3F),
+ scsicmd->cmnd[0],
le32_to_cpu(srbreply->scsi_status));
#endif
- scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
- break;
+ if ((scsicmd->cmnd[0] == ATA_12)
+ || (scsicmd->cmnd[0] == ATA_16)) {
+ if (scsicmd->cmnd[2] & (0x01 << 5)) {
+ scsicmd->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
+ } else {
+ scsicmd->result = DID_ERROR << 16
+ | COMMAND_COMPLETE << 8;
+ break;
+ }
}
- if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){ // Check Condition
+ if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
int len;
scsicmd->result |= SAM_STAT_CHECK_CONDITION;
- len = (le32_to_cpu(srbreply->sense_data_size) >
- sizeof(scsicmd->sense_buffer)) ?
- sizeof(scsicmd->sense_buffer) :
- le32_to_cpu(srbreply->sense_data_size);
+ len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+ SCSI_SENSE_BUFFERSIZE);
#ifdef AAC_DETAILED_STATUS_INFO
- dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
- le32_to_cpu(srbreply->status), len));
+ printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
+ le32_to_cpu(srbreply->status), len);
#endif
memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
-
}
/*
* OR in the scsi status (already shifted up a bit)
*/
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
- fib_complete(fibptr);
- fib_free(fibptr);
- aac_io_done(scsicmd);
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ scsicmd->scsi_done(scsicmd);
}
/**
@@ -1840,7 +2865,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
* aac_send_scb_fib
* @scsicmd: the scsi command block
*
- * This routine will form a FIB and fill in the aac_srb from the
+ * This routine will form a FIB and fill in the aac_srb from the
* scsicmd passed in.
*/
@@ -1849,150 +2874,70 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
struct fib* cmd_fibcontext;
struct aac_dev* dev;
int status;
- struct aac_srb *srbcmd;
- u16 fibsize;
- u32 flag;
- u32 timeout;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (scsicmd->device->id >= dev->maximum_num_physicals ||
+ if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
scsicmd->device->lun > 7) {
scsicmd->result = DID_NO_CONNECT << 16;
scsicmd->scsi_done(scsicmd);
return 0;
}
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- switch(scsicmd->sc_data_direction){
- case DMA_TO_DEVICE:
- flag = SRB_DataOut;
- break;
- case DMA_BIDIRECTIONAL:
- flag = SRB_DataIn | SRB_DataOut;
- break;
- case DMA_FROM_DEVICE:
- flag = SRB_DataIn;
- break;
- case DMA_NONE:
- default: /* shuts up some versions of gcc */
- flag = SRB_NoDataXfer;
- break;
- }
-
-
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
- if (!(cmd_fibcontext = fib_alloc(dev))) {
+ if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
return -1;
}
- fib_init(cmd_fibcontext);
+ status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
- srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
- srbcmd->id = cpu_to_le32(scsicmd->device->id);
- srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
- srbcmd->flags = cpu_to_le32(flag);
- timeout = (scsicmd->timeout-jiffies)/HZ;
- if(timeout == 0){
- timeout = 1;
- }
- srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
- srbcmd->retry_limit = 0; /* Obsolete parameter */
- srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
-
- if( dev->dac_support == 1 ) {
- aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
- srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
-
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
- /*
- * Build Scatter/Gather list
- */
- fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
- ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
- sizeof (struct sgentry64));
- BUG_ON (fibsize > (dev->max_fib_size -
- sizeof(struct aac_fibhdr)));
-
- /*
- * Now send the Fib to the adapter
- */
- status = fib_send(ScsiPortCommand64, cmd_fibcontext,
- fibsize, FsaNormal, 0, 1,
- (fib_callback) aac_srb_callback,
- (void *) scsicmd);
- } else {
- aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
- srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
-
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
- /*
- * Build Scatter/Gather list
- */
- fibsize = sizeof (struct aac_srb) +
- (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
- sizeof (struct sgentry));
- BUG_ON (fibsize > (dev->max_fib_size -
- sizeof(struct aac_fibhdr)));
-
- /*
- * Now send the Fib to the adapter
- */
- status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
- (fib_callback) aac_srb_callback, (void *) scsicmd);
- }
/*
* Check that the command queued to the controller
*/
- if (status == -EINPROGRESS){
+ if (status == -EINPROGRESS) {
+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
return 0;
}
- printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
- fib_complete(cmd_fibcontext);
- fib_free(cmd_fibcontext);
+ printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
+ aac_fib_complete(cmd_fibcontext);
+ aac_fib_free(cmd_fibcontext);
return -1;
}
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
+ int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr = 0;
- psg->sg[0].count = 0;
- if (scsicmd->use_sg) {
+ psg->sg[0].count = 0;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
-
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
- psg->count = cpu_to_le32(sg_count);
- byte_count = 0;
+ psg->count = cpu_to_le32(nseg);
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
byte_count += sg_dma_len(sg);
- sg++;
}
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2000,27 +2945,16 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- dma_addr_t addr;
- addr = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- psg->count = cpu_to_le32(1);
- psg->sg[0].addr = cpu_to_le32(addr);
- psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
- scsicmd->SCp.dma_handle = addr;
- byte_count = scsicmd->request_bufflen;
- }
return byte_count;
}
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
u64 addr;
+ int nseg;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
@@ -2028,32 +2962,29 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
psg->sg[0].addr[0] = 0;
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
- if (scsicmd->use_sg) {
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
struct scatterlist *sg;
int i;
- int sg_count;
- sg = (struct scatterlist *) scsicmd->request_buffer;
-
- sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
- scsicmd->sc_data_direction);
- psg->count = cpu_to_le32(sg_count);
- byte_count = 0;
-
- for (i = 0; i < sg_count; i++) {
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
addr = sg_dma_address(sg);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
- psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
- byte_count += sg_dma_len(sg);
- sg++;
+ psg->sg[i].count = cpu_to_le32(count);
+ byte_count += count;
}
+ psg->count = cpu_to_le32(nseg);
/* hba wants the size to be exact */
- if(byte_count > scsicmd->request_bufflen){
- u32 temp = le32_to_cpu(psg->sg[i-1].count) -
- (byte_count - scsicmd->request_bufflen);
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
psg->sg[i-1].count = cpu_to_le32(temp);
- byte_count = scsicmd->request_bufflen;
+ byte_count = scsi_bufflen(scsicmd);
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2061,22 +2992,172 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
byte_count, scsicmd->underflow);
}
}
- else if(scsicmd->request_bufflen) {
- u64 addr;
- addr = pci_map_single(dev->pdev,
- scsicmd->request_buffer,
- scsicmd->request_bufflen,
- scsicmd->sc_data_direction);
- psg->count = cpu_to_le32(1);
- psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
- psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
- psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
- scsicmd->SCp.dma_handle = addr;
- byte_count = scsicmd->request_bufflen;
+ return byte_count;
+}
+
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
+{
+ unsigned long byte_count = 0;
+ int nseg;
+
+ // Get rid of old data
+ psg->count = 0;
+ psg->sg[0].next = 0;
+ psg->sg[0].prev = 0;
+ psg->sg[0].addr[0] = 0;
+ psg->sg[0].addr[1] = 0;
+ psg->sg[0].count = 0;
+ psg->sg[0].flags = 0;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
+ struct scatterlist *sg;
+ int i;
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
+ u64 addr = sg_dma_address(sg);
+ psg->sg[i].next = 0;
+ psg->sg[i].prev = 0;
+ psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
+ psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+ psg->sg[i].count = cpu_to_le32(count);
+ psg->sg[i].flags = 0;
+ byte_count += count;
+ }
+ psg->count = cpu_to_le32(nseg);
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+ (byte_count - scsi_bufflen(scsicmd));
+ psg->sg[i-1].count = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+ /* Check for command underflow */
+ if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
+ printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
+ }
+ return byte_count;
+}
+
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max)
+{
+ unsigned long byte_count = 0;
+ int nseg;
+
+ nseg = scsi_dma_map(scsicmd);
+ if (nseg < 0)
+ return nseg;
+ if (nseg) {
+ struct scatterlist *sg;
+ int i, conformable = 0;
+ u32 min_size = PAGE_SIZE, cur_size;
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
+ u64 addr = sg_dma_address(sg);
+
+ BUG_ON(i >= sg_max);
+ rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
+ rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
+ cur_size = cpu_to_le32(count);
+ rio2->sge[i].length = cur_size;
+ rio2->sge[i].flags = 0;
+ if (i == 0) {
+ conformable = 1;
+ rio2->sgeFirstSize = cur_size;
+ } else if (i == 1) {
+ rio2->sgeNominalSize = cur_size;
+ min_size = cur_size;
+ } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
+ conformable = 0;
+ if (cur_size < min_size)
+ min_size = cur_size;
+ }
+ byte_count += count;
+ }
+
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
+ (byte_count - scsi_bufflen(scsicmd));
+ rio2->sge[i-1].length = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+
+ rio2->sgeCnt = cpu_to_le32(nseg);
+ rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
+ /* not conformable: evaluate required sg elements */
+ if (!conformable) {
+ int j, nseg_new = nseg, err_found;
+ for (i = min_size / PAGE_SIZE; i >= 1; --i) {
+ err_found = 0;
+ nseg_new = 2;
+ for (j = 1; j < nseg - 1; ++j) {
+ if (rio2->sge[j].length % (i*PAGE_SIZE)) {
+ err_found = 1;
+ break;
+ }
+ nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
+ }
+ if (!err_found)
+ break;
+ }
+ if (i > 0 && nseg_new <= sg_max)
+ aac_convert_sgraw2(rio2, i, nseg, nseg_new);
+ } else
+ rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+
+ /* Check for command underflow */
+ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+ printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
}
+
return byte_count;
}
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
+{
+ struct sge_ieee1212 *sge;
+ int i, j, pos;
+ u32 addr_low;
+
+ if (aac_convert_sgl == 0)
+ return 0;
+
+ sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
+ if (sge == NULL)
+ return -1;
+
+ for (i = 1, pos = 1; i < nseg-1; ++i) {
+ for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
+ addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
+ sge[pos].addrLow = addr_low;
+ sge[pos].addrHigh = rio2->sge[i].addrHigh;
+ if (addr_low < rio2->sge[i].addrLow)
+ sge[pos].addrHigh++;
+ sge[pos].length = pages * PAGE_SIZE;
+ sge[pos].flags = 0;
+ pos++;
+ }
+ }
+ sge[pos] = rio2->sge[nseg-1];
+ memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
+
+ kfree(sge);
+ rio2->sgeCnt = cpu_to_le32(nseg_new);
+ rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+ rio2->sgeNominalSize = pages * PAGE_SIZE;
+ return 0;
+}
+
#ifdef AAC_DETAILED_STATUS_INFO
struct aac_srb_status_info {
@@ -2090,7 +3171,7 @@ static struct aac_srb_status_info srb_status_info[] = {
{ SRB_STATUS_SUCCESS, "Success"},
{ SRB_STATUS_ABORTED, "Aborted Command"},
{ SRB_STATUS_ABORT_FAILED, "Abort Failed"},
- { SRB_STATUS_ERROR, "Error Event"},
+ { SRB_STATUS_ERROR, "Error Event"},
{ SRB_STATUS_BUSY, "Device Busy"},
{ SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
{ SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
@@ -2109,13 +3190,13 @@ static struct aac_srb_status_info srb_status_info[] = {
{ SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
{ SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
{ SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
- { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
+ { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
{ SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
{ SRB_STATUS_BAD_FUNCTION, "Bad Function"},
{ SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
{ SRB_STATUS_NOT_STARTED, "Not Started"},
{ SRB_STATUS_NOT_IN_USE, "Not In Use"},
- { SRB_STATUS_FORCE_ABORT, "Force Abort"},
+ { SRB_STATUS_FORCE_ABORT, "Force Abort"},
{ SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
{ 0xff, "Unknown Error"}
};
@@ -2124,11 +3205,9 @@ char *aac_get_status_string(u32 status)
{
int i;
- for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
- if(srb_status_info[i].status == status){
+ for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
+ if (srb_status_info[i].status == status)
return srb_status_info[i].str;
- }
- }
return "Bad Status Code";
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3a11a536c0d..eaaf8705a5f 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,37 +1,47 @@
-#if (!defined(dprintk))
+#ifndef dprintk
# define dprintk(x)
#endif
+/* eg: if (nblank(dprintk(x))) */
+#define _nblank(x) #x
+#define nblank(x) _nblank(x)[0]
+
+#include <linux/interrupt.h>
/*------------------------------------------------------------------------------
* D E F I N E S
*----------------------------------------------------------------------------*/
+#ifndef AAC_DRIVER_BUILD
+# define AAC_DRIVER_BUILD 30300
+# define AAC_DRIVER_BRANCH "-ms"
+#endif
#define MAXIMUM_NUM_CONTAINERS 32
#define AAC_NUM_MGT_FIB 8
-#define AAC_NUM_IO_FIB (512 - AAC_NUM_MGT_FIB)
+#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
#define AAC_MAX_LUN (8)
#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
-/*
- * max_sectors is an unsigned short, otherwise limit is 0x100000000 / 512
- * Linux has starvation problems if we permit larger than 4MB I/O ...
- */
-#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)8192)
+#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
+
+#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
/*
* These macros convert from physical channels to virtual channels
*/
#define CONTAINER_CHANNEL (0)
-#define ID_LUN_TO_CONTAINER(id, lun) (id)
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
-#define aac_phys_to_logical(x) (x+1)
-#define aac_logical_to_phys(x) (x?x-1:0)
+#define PMC_DEVICE_S7 0x28c
+#define PMC_DEVICE_S8 0x28d
+#define PMC_DEVICE_S9 0x28f
+
+#define aac_phys_to_logical(x) ((x)+1)
+#define aac_logical_to_phys(x) ((x)?(x)-1:0)
/* #define AAC_DETAILED_STATUS_INFO */
@@ -44,49 +54,13 @@ struct diskparm
/*
- * DON'T CHANGE THE ORDER, this is set by the firmware
+ * Firmware constants
*/
-
+
#define CT_NONE 0
-#define CT_VOLUME 1
-#define CT_MIRROR 2
-#define CT_STRIPE 3
-#define CT_RAID5 4
-#define CT_SSRW 5
-#define CT_SSRO 6
-#define CT_MORPH 7
-#define CT_PASSTHRU 8
-#define CT_RAID4 9
-#define CT_RAID10 10 /* stripe of mirror */
-#define CT_RAID00 11 /* stripe of stripe */
-#define CT_VOLUME_OF_MIRRORS 12 /* volume of mirror */
-#define CT_PSEUDO_RAID 13 /* really raid4 */
-#define CT_LAST_VOLUME_TYPE 14
-#define CT_OK 218
-
-/*
- * Types of objects addressable in some fashion by the client.
- * This is a superset of those objects handled just by the filesystem
- * and includes "raw" objects that an administrator would use to
- * configure containers and filesystems.
- */
-
-#define FT_REG 1 /* regular file */
-#define FT_DIR 2 /* directory */
-#define FT_BLK 3 /* "block" device - reserved */
-#define FT_CHR 4 /* "character special" device - reserved */
-#define FT_LNK 5 /* symbolic link */
-#define FT_SOCK 6 /* socket */
-#define FT_FIFO 7 /* fifo */
+#define CT_OK 218
#define FT_FILESYS 8 /* ADAPTEC's "FSA"(tm) filesystem */
#define FT_DRIVE 9 /* physical disk - addressable in scsi by bus/id/lun */
-#define FT_SLICE 10 /* virtual disk - raw volume - slice */
-#define FT_PARTITION 11 /* FSA partition - carved out of a slice - building block for containers */
-#define FT_VOLUME 12 /* Container - Volume Set */
-#define FT_STRIPE 13 /* Container - Stripe Set */
-#define FT_MIRROR 14 /* Container - Mirror Set */
-#define FT_RAID5 15 /* Container - Raid 5 Set */
-#define FT_DATABASE 16 /* Storage object with "foreign" content manager */
/*
* Host side memory scatter gather list
@@ -114,6 +88,29 @@ struct user_sgentry64 {
u32 count; /* Length. */
};
+struct sgentryraw {
+ __le32 next; /* reserved for F/W use */
+ __le32 prev; /* reserved for F/W use */
+ __le32 addr[2];
+ __le32 count;
+ __le32 flags; /* reserved for F/W use */
+};
+
+struct user_sgentryraw {
+ u32 next; /* reserved for F/W use */
+ u32 prev; /* reserved for F/W use */
+ u32 addr[2];
+ u32 count;
+ u32 flags; /* reserved for F/W use */
+};
+
+struct sge_ieee1212 {
+ u32 addrLow;
+ u32 addrHigh;
+ u32 length;
+ u32 flags;
+};
+
/*
* SGMAP
*
@@ -123,12 +120,12 @@ struct user_sgentry64 {
struct sgmap {
__le32 count;
- struct sgentry sg[1];
+ struct sgentry sg[1];
};
struct user_sgmap {
u32 count;
- struct user_sgentry sg[1];
+ struct user_sgentry sg[1];
};
struct sgmap64 {
@@ -141,20 +138,30 @@ struct user_sgmap64 {
struct user_sgentry64 sg[1];
};
+struct sgmapraw {
+ __le32 count;
+ struct sgentryraw sg[1];
+};
+
+struct user_sgmapraw {
+ u32 count;
+ struct user_sgentryraw sg[1];
+};
+
struct creation_info
{
- u8 buildnum; /* e.g., 588 */
- u8 usec; /* e.g., 588 */
- u8 via; /* e.g., 1 = FSU,
- * 2 = API
+ u8 buildnum; /* e.g., 588 */
+ u8 usec; /* e.g., 588 */
+ u8 via; /* e.g., 1 = FSU,
+ * 2 = API
*/
- u8 year; /* e.g., 1997 = 97 */
+ u8 year; /* e.g., 1997 = 97 */
__le32 date; /*
- * unsigned Month :4; // 1 - 12
- * unsigned Day :6; // 1 - 32
- * unsigned Hour :6; // 0 - 23
- * unsigned Minute :6; // 0 - 60
- * unsigned Second :6; // 0 - 60
+ * unsigned Month :4; // 1 - 12
+ * unsigned Day :6; // 1 - 32
+ * unsigned Hour :6; // 0 - 23
+ * unsigned Minute :6; // 0 - 60
+ * unsigned Second :6; // 0 - 60
*/
__le32 serial[2]; /* e.g., 0x1DEADB0BFAFAF001 */
};
@@ -190,7 +197,7 @@ struct creation_info
/*
* Set the queues on a 16 byte alignment
*/
-
+
#define QUEUE_ALIGNMENT 16
/*
@@ -209,9 +216,9 @@ struct aac_entry {
* The adapter assumes the ProducerIndex and ConsumerIndex are grouped
* adjacently and in that order.
*/
-
+
struct aac_qhdr {
- __le64 header_addr;/* Address to hand the adapter to access
+ __le64 header_addr;/* Address to hand the adapter to access
to this queue head */
__le32 *producer; /* The producer index for this queue (host address) */
__le32 *consumer; /* The consumer index for this queue (host address) */
@@ -221,7 +228,7 @@ struct aac_qhdr {
* Define all the events which the adapter would like to notify
* the host of.
*/
-
+
#define HostNormCmdQue 1 /* Change in host normal priority command queue */
#define HostHighCmdQue 2 /* Change in host high priority command queue */
#define HostNormRespQue 3 /* Change in host normal priority response queue */
@@ -274,13 +281,24 @@ enum aac_queue_types {
*/
#define FIB_MAGIC 0x0001
+#define FIB_MAGIC2 0x0004
+#define FIB_MAGIC2_64 0x0005
/*
* Define the priority levels the FSA communication routines support.
*/
#define FsaNormal 1
-#define FsaHigh 2
+
+/* transport FIB header (PMC) */
+struct aac_fib_xporthdr {
+ u64 HostAddress; /* FIB host address w/o xport header */
+ u32 Size; /* FIB size excluding xport header */
+ u32 Handle; /* driver handle to reference the FIB */
+ u64 Reserved[2];
+};
+
+#define ALIGN32 32
/*
* Define the FIB. The FIB is the where all the requested data and
@@ -291,22 +309,20 @@ struct aac_fibhdr {
__le32 XferState; /* Current transfer state for this CCB */
__le16 Command; /* Routing information for the destination */
u8 StructType; /* Type FIB */
- u8 Flags; /* Flags for FIB */
+ u8 Unused; /* Unused */
__le16 Size; /* Size of this FIB in bytes */
- __le16 SenderSize; /* Size of the FIB in the sender
+ __le16 SenderSize; /* Size of the FIB in the sender
(for response sizing) */
__le32 SenderFibAddress; /* Host defined data in the FIB */
- __le32 ReceiverFibAddress;/* Logical address of this FIB for
- the adapter */
- u32 SenderData; /* Place holder for the sender to store data */
union {
- struct {
- __le32 _ReceiverTimeStart; /* Timestamp for
- receipt of fib */
- __le32 _ReceiverTimeDone; /* Timestamp for
- completion of fib */
- } _s;
- } _u;
+ __le32 ReceiverFibAddress;/* Logical address of this FIB for
+ the adapter (old) */
+ __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
+ __le32 TimeStamp; /* otherwise timestamp for FW internal use */
+ } u;
+ u32 Handle; /* FIB handle used for MSGU commnunication */
+ u32 Previous; /* FW internal use */
+ u32 Next; /* FW internal use */
};
struct hw_fib {
@@ -318,7 +334,7 @@ struct hw_fib {
* FIB commands
*/
-#define TestCommandResponse 1
+#define TestCommandResponse 1
#define TestAdapterCommand 2
/*
* Lowlevel and comm commands
@@ -355,10 +371,8 @@ struct hw_fib {
*/
#define ContainerCommand 500
#define ContainerCommand64 501
-/*
- * Cluster Commands
- */
-#define ClusterCommand 550
+#define ContainerRawIo 502
+#define ContainerRawIo2 503
/*
* Scsi Port commands (scsi passthrough)
*/
@@ -381,19 +395,19 @@ struct hw_fib {
*/
enum fib_xfer_state {
- HostOwned = (1<<0),
- AdapterOwned = (1<<1),
- FibInitialized = (1<<2),
- FibEmpty = (1<<3),
- AllocatedFromPool = (1<<4),
- SentFromHost = (1<<5),
- SentFromAdapter = (1<<6),
- ResponseExpected = (1<<7),
- NoResponseExpected = (1<<8),
- AdapterProcessed = (1<<9),
- HostProcessed = (1<<10),
- HighPriority = (1<<11),
- NormalPriority = (1<<12),
+ HostOwned = (1<<0),
+ AdapterOwned = (1<<1),
+ FibInitialized = (1<<2),
+ FibEmpty = (1<<3),
+ AllocatedFromPool = (1<<4),
+ SentFromHost = (1<<5),
+ SentFromAdapter = (1<<6),
+ ResponseExpected = (1<<7),
+ NoResponseExpected = (1<<8),
+ AdapterProcessed = (1<<9),
+ HostProcessed = (1<<10),
+ HighPriority = (1<<11),
+ NormalPriority = (1<<12),
Async = (1<<13),
AsyncIo = (1<<13), // rpbfix: remove with new regime
PageFileIo = (1<<14), // rpbfix: remove with new regime
@@ -402,7 +416,9 @@ enum fib_xfer_state {
AdapterMicroFib = (1<<17),
BIOSFibPath = (1<<18),
FastResponseCapable = (1<<19),
- ApiFib = (1<<20) // Its an API Fib.
+ ApiFib = (1<<20), /* Its an API Fib */
+ /* PMC NEW COMM: There is no more AIF data pending */
+ NoMoreAifDataAvailable = (1<<21)
};
/*
@@ -412,6 +428,8 @@ enum fib_xfer_state {
#define ADAPTER_INIT_STRUCT_REVISION 3
#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
+#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
+#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */
struct aac_init
{
@@ -426,7 +444,7 @@ struct aac_init
__le32 AdapterFibAlign;
__le32 printfbuf;
__le32 printfbufsiz;
- __le32 HostPhysMemPages; /* number of 4k pages of host
+ __le32 HostPhysMemPages; /* number of 4k pages of host
physical memory */
__le32 HostElapsedSeconds; /* number of seconds since 1970. */
/*
@@ -434,9 +452,19 @@ struct aac_init
*/
__le32 InitFlags; /* flags for supported features */
#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
+#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
+#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
+#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040
+#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080
+#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100
__le32 MaxIoCommands; /* max outstanding commands */
__le32 MaxIoSize; /* largest I/O command */
__le32 MaxFibSize; /* largest FIB to adapter */
+ /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
+ __le32 MaxNumAif; /* max number of aif */
+ /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
+ __le32 HostRRQ_AddrLow;
+ __le32 HostRRQ_AddrHigh; /* Host RRQ (response queue) for SRC */
};
enum aac_log_level {
@@ -455,13 +483,30 @@ enum aac_log_level {
#define FSAFS_NTC_FIB_CONTEXT 0x030c
struct aac_dev;
+struct fib;
+struct scsi_cmnd;
struct adapter_ops
{
+ /* Low level operations */
void (*adapter_interrupt)(struct aac_dev *dev);
void (*adapter_notify)(struct aac_dev *dev, u32 event);
+ void (*adapter_disable_int)(struct aac_dev *dev);
+ void (*adapter_enable_int)(struct aac_dev *dev);
int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
int (*adapter_check_health)(struct aac_dev *dev);
+ int (*adapter_restart)(struct aac_dev *dev, int bled);
+ /* Transport operations */
+ int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
+ irq_handler_t adapter_intr;
+ /* Packet operations */
+ int (*adapter_deliver)(struct fib * fib);
+ int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
+ int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
+ int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua);
+ int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
+ /* Administrative operations */
+ int (*adapter_comm)(struct aac_dev * dev, int comm);
};
/*
@@ -470,7 +515,7 @@ struct adapter_ops
struct aac_driver_ident
{
- int (*init)(struct aac_dev *dev);
+ int (*init)(struct aac_dev *dev);
char * name;
char * vname;
char * model;
@@ -478,7 +523,7 @@ struct aac_driver_ident
int quirks;
};
/*
- * Some adapter firmware needs communication memory
+ * Some adapter firmware needs communication memory
* below 2gig. This tells the init function to set the
* dma mask such that fib memory will be allocated where the
* adapter firmware can get to it.
@@ -503,36 +548,46 @@ struct aac_driver_ident
#define AAC_QUIRK_MASTER 0x0008
/*
+ * Some adapter firmware perform poorly when it must split up scatter gathers
+ * in order to deal with the limits of the underlying CHIM. This limit in this
+ * class of adapters is 17 scatter gather elements.
+ */
+#define AAC_QUIRK_17SG 0x0010
+
+/*
+ * Some adapter firmware does not support 64 bit scsi passthrough
+ * commands.
+ */
+#define AAC_QUIRK_SCSI_32 0x0020
+
+/*
* The adapter interface specs all queues to be located in the same
- * physically contigous block. The host structure that defines the
+ * physically contiguous block. The host structure that defines the
* commuication queues will assume they are each a separate physically
- * contigous memory region that will support them all being one big
- * contigous block.
+ * contiguous memory region that will support them all being one big
+ * contiguous block.
* There is a command and response queue for each level and direction of
* commuication. These regions are accessed by both the host and adapter.
*/
-
+
struct aac_queue {
- u64 logical; /*address we give the adapter */
+ u64 logical; /*address we give the adapter */
struct aac_entry *base; /*system virtual address */
- struct aac_qhdr headers; /*producer,consumer q headers*/
- u32 entries; /*Number of queue entries */
+ struct aac_qhdr headers; /*producer,consumer q headers*/
+ u32 entries; /*Number of queue entries */
wait_queue_head_t qfull; /*Event to wait on if q full */
wait_queue_head_t cmdready; /*Cmd ready from the adapter */
- /* This is only valid for adapter to host command queues. */
- spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
+ /* This is only valid for adapter to host command queues. */
+ spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
- unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
- u32 padding; /* Padding - FIXME - can remove I believe */
- struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
- /* only valid for command queues which receive entries from the adapter. */
- struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
+ struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
+ /* only valid for command queues which receive entries from the adapter. */
u32 numpending; /* Number of entries on outstanding queue. */
struct aac_dev * dev; /* Back pointer to adapter structure */
};
/*
- * Message queues. The order here is important, see also the
+ * Message queues. The order here is important, see also the
* queue type ordering
*/
@@ -544,12 +599,12 @@ struct aac_queue_block
/*
* SaP1 Message Unit Registers
*/
-
+
struct sa_drawbridge_CSR {
- /* Offset | Name */
+ /* Offset | Name */
__le32 reserved[10]; /* 00h-27h | Reserved */
u8 LUT_Offset; /* 28h | Lookup Table Offset */
- u8 reserved1[3]; /* 29h-2bh | Reserved */
+ u8 reserved1[3]; /* 29h-2bh | Reserved */
__le32 LUT_Data; /* 2ch | Looup Table Data */
__le32 reserved2[26]; /* 30h-97h | Reserved */
__le16 PRICLEARIRQ; /* 98h | Primary Clear Irq */
@@ -568,8 +623,8 @@ struct sa_drawbridge_CSR {
__le32 MAILBOX5; /* bch | Scratchpad 5 */
__le32 MAILBOX6; /* c0h | Scratchpad 6 */
__le32 MAILBOX7; /* c4h | Scratchpad 7 */
- __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */
- __le32 ROM_Control_Addr;/* cch | Rom Control and Address */
+ __le32 ROM_Setup_Data; /* c8h | Rom Setup and Data */
+ __le32 ROM_Control_Addr;/* cch | Rom Control and Address */
__le32 reserved3[12]; /* d0h-ffh | reserved */
__le32 LUT[64]; /* 100h-1ffh | Lookup Table Entries */
};
@@ -582,7 +637,7 @@ struct sa_drawbridge_CSR {
#define Mailbox5 SaDbCSR.MAILBOX5
#define Mailbox6 SaDbCSR.MAILBOX6
#define Mailbox7 SaDbCSR.MAILBOX7
-
+
#define DoorbellReg_p SaDbCSR.PRISETIRQ
#define DoorbellReg_s SaDbCSR.SECSETIRQ
#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
@@ -596,19 +651,19 @@ struct sa_drawbridge_CSR {
#define DOORBELL_5 0x0020
#define DOORBELL_6 0x0040
-
+
#define PrintfReady DOORBELL_5
#define PrintfDone DOORBELL_5
-
+
struct sa_registers {
struct sa_drawbridge_CSR SaDbCSR; /* 98h - c4h */
};
-
+
#define Sa_MINIPORT_REVISION 1
#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
-#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
+#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
#define sa_writew(AEP, CSR, value) writew(value, &((AEP)->regs.sa->CSR))
#define sa_writel(AEP, CSR, value) writel(value, &((AEP)->regs.sa->CSR))
@@ -625,31 +680,27 @@ struct rx_mu_registers {
__le32 IMRx[2]; /* 1310h | 10h | Inbound Message Registers */
__le32 OMRx[2]; /* 1318h | 18h | Outbound Message Registers */
__le32 IDR; /* 1320h | 20h | Inbound Doorbell Register */
- __le32 IISR; /* 1324h | 24h | Inbound Interrupt
+ __le32 IISR; /* 1324h | 24h | Inbound Interrupt
Status Register */
- __le32 IIMR; /* 1328h | 28h | Inbound Interrupt
- Mask Register */
+ __le32 IIMR; /* 1328h | 28h | Inbound Interrupt
+ Mask Register */
__le32 ODR; /* 132Ch | 2Ch | Outbound Doorbell Register */
- __le32 OISR; /* 1330h | 30h | Outbound Interrupt
+ __le32 OISR; /* 1330h | 30h | Outbound Interrupt
Status Register */
- __le32 OIMR; /* 1334h | 34h | Outbound Interrupt
+ __le32 OIMR; /* 1334h | 34h | Outbound Interrupt
Mask Register */
- /* * Must access through ATU Inbound
- Translation Window */
+ __le32 reserved2; /* 1338h | 38h | Reserved */
+ __le32 reserved3; /* 133Ch | 3Ch | Reserved */
+ __le32 InboundQueue;/* 1340h | 40h | Inbound Queue Port relative to firmware */
+ __le32 OutboundQueue;/*1344h | 44h | Outbound Queue Port relative to firmware */
+ /* * Must access through ATU Inbound
+ Translation Window */
};
struct rx_inbound {
__le32 Mailbox[8];
};
-#define InboundMailbox0 IndexRegs.Mailbox[0]
-#define InboundMailbox1 IndexRegs.Mailbox[1]
-#define InboundMailbox2 IndexRegs.Mailbox[2]
-#define InboundMailbox3 IndexRegs.Mailbox[3]
-#define InboundMailbox4 IndexRegs.Mailbox[4]
-#define InboundMailbox5 IndexRegs.Mailbox[5]
-#define InboundMailbox6 IndexRegs.Mailbox[6]
-
#define INBOUNDDOORBELL_0 0x00000001
#define INBOUNDDOORBELL_1 0x00000002
#define INBOUNDDOORBELL_2 0x00000004
@@ -668,8 +719,8 @@ struct rx_inbound {
#define OutboundDoorbellReg MUnit.ODR
struct rx_registers {
- struct rx_mu_registers MUnit; /* 1300h - 1334h */
- __le32 reserved1[6]; /* 1338h - 134ch */
+ struct rx_mu_registers MUnit; /* 1300h - 1347h */
+ __le32 reserved1[2]; /* 1348h - 134ch */
struct rx_inbound IndexRegs;
};
@@ -686,8 +737,8 @@ struct rx_registers {
#define rkt_inbound rx_inbound
struct rkt_registers {
- struct rkt_mu_registers MUnit; /* 1300h - 1334h */
- __le32 reserved1[1010]; /* 1338h - 22fch */
+ struct rkt_mu_registers MUnit; /* 1300h - 1347h */
+ __le32 reserved1[1006]; /* 1348h - 22fch */
struct rkt_inbound IndexRegs; /* 2300h - */
};
@@ -696,17 +747,61 @@ struct rkt_registers {
#define rkt_writeb(AEP, CSR, value) writeb(value, &((AEP)->regs.rkt->CSR))
#define rkt_writel(AEP, CSR, value) writel(value, &((AEP)->regs.rkt->CSR))
-struct fib;
+/*
+ * PMC SRC message unit registers
+ */
+
+#define src_inbound rx_inbound
+
+struct src_mu_registers {
+ /* PCI*| Name */
+ __le32 reserved0[8]; /* 00h | Reserved */
+ __le32 IDR; /* 20h | Inbound Doorbell Register */
+ __le32 IISR; /* 24h | Inbound Int. Status Register */
+ __le32 reserved1[3]; /* 28h | Reserved */
+ __le32 OIMR; /* 34h | Outbound Int. Mask Register */
+ __le32 reserved2[25]; /* 38h | Reserved */
+ __le32 ODR_R; /* 9ch | Outbound Doorbell Read */
+ __le32 ODR_C; /* a0h | Outbound Doorbell Clear */
+ __le32 reserved3[6]; /* a4h | Reserved */
+ __le32 OMR; /* bch | Outbound Message Register */
+ __le32 IQ_L; /* c0h | Inbound Queue (Low address) */
+ __le32 IQ_H; /* c4h | Inbound Queue (High address) */
+};
+
+struct src_registers {
+ struct src_mu_registers MUnit; /* 00h - c7h */
+ union {
+ struct {
+ __le32 reserved1[130790]; /* c8h - 7fc5fh */
+ struct src_inbound IndexRegs; /* 7fc60h */
+ } tupelo;
+ struct {
+ __le32 reserved1[974]; /* c8h - fffh */
+ struct src_inbound IndexRegs; /* 1000h */
+ } denali;
+ } u;
+};
+
+#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR))
+#define src_readl(AEP, CSR) readl(&((AEP)->regs.src.bar0->CSR))
+#define src_writeb(AEP, CSR, value) writeb(value, \
+ &((AEP)->regs.src.bar0->CSR))
+#define src_writel(AEP, CSR, value) writel(value, \
+ &((AEP)->regs.src.bar0->CSR))
+
+#define SRC_ODR_SHIFT 12
+#define SRC_IDR_SHIFT 9
typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
struct aac_fib_context {
- s16 type; // used for verification of structure
- s16 size;
+ s16 type; // used for verification of structure
+ s16 size;
u32 unique; // unique value representing this context
ulong jiffies; // used for cleanup - dmb changed to ulong
struct list_head next; // used to link context's into a linked list
- struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
+ struct semaphore wait_sem; // this is used to wait for the next fib to arrive.
int wait; // Set to true when thread is in WaitForSingleObject
unsigned long count; // total number of FIBs on FibList
struct list_head fib_list; // this holds fibs and their attachd hw_fibs
@@ -725,9 +820,9 @@ struct sense_data {
u8 EOM:1; /* End Of Medium - reserved for random access devices */
u8 filemark:1; /* Filemark - reserved for random access devices */
- u8 information[4]; /* for direct-access devices, contains the unsigned
- * logical block address or residue associated with
- * the sense key
+ u8 information[4]; /* for direct-access devices, contains the unsigned
+ * logical block address or residue associated with
+ * the sense key
*/
u8 add_sense_len; /* number of additional sense bytes to follow this field */
u8 cmnd_info[4]; /* not used */
@@ -737,7 +832,7 @@ struct sense_data {
u8 bit_ptr:3; /* indicates which byte of the CDB or parameter data
* was in error
*/
- u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
+ u8 BPV:1; /* bit pointer valid (BPV): 1- indicates that
* the bit_ptr field has valid value
*/
u8 reserved2:2;
@@ -752,7 +847,10 @@ struct fsa_dev_info {
u64 last;
u64 size;
u32 type;
+ u32 config_waiting_on;
+ unsigned long config_waiting_stamp;
u16 queue_depth;
+ u8 config_needed;
u8 valid;
u8 ro;
u8 locked;
@@ -768,30 +866,25 @@ struct fib {
/*
* The Adapter that this I/O is destined for.
*/
- struct aac_dev *dev;
+ struct aac_dev *dev;
/*
* This is the event the sendfib routine will wait on if the
* caller did not pass one and this is synch io.
*/
- struct semaphore event_wait;
+ struct semaphore event_wait;
spinlock_t event_lock;
u32 done; /* gets set to 1 when fib is complete */
- fib_callback callback;
- void *callback_data;
+ fib_callback callback;
+ void *callback_data;
u32 flags; // u32 dmb was ulong
/*
- * The following is used to put this fib context onto the
- * Outstanding I/O queue.
- */
- struct list_head queue;
- /*
* And for the internal issue/reply queues (we may be able
* to merge these two)
*/
struct list_head fiblink;
- void *data;
- struct hw_fib *hw_fib; /* Actual shared object */
+ void *data;
+ struct hw_fib *hw_fib_va; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
@@ -800,7 +893,7 @@ struct fib {
*
* This is returned by the RequestAdapterInfo block
*/
-
+
struct aac_adapter_info
{
__le32 platform;
@@ -819,7 +912,7 @@ struct aac_adapter_info
__le32 biosrev;
__le32 biosbuild;
__le32 cluster;
- __le32 clusterchannelmask;
+ __le32 clusterchannelmask;
__le32 serial[2];
__le32 battery;
__le32 options;
@@ -836,12 +929,37 @@ struct aac_supplement_adapter_info
__le32 Version;
__le32 FeatureBits;
u8 SlotNumber;
- u8 ReservedPad0[0];
+ u8 ReservedPad0[3];
u8 BuildDate[12];
__le32 CurrentNumberPorts;
- __le32 ReservedGrowth[24];
-};
-#define AAC_FEATURE_FALCON 0x00000010
+ struct {
+ u8 AssemblyPn[8];
+ u8 FruPn[8];
+ u8 BatteryFruPn[8];
+ u8 EcVersionString[8];
+ u8 Tsid[12];
+ } VpdInfo;
+ __le32 FlashFirmwareRevision;
+ __le32 FlashFirmwareBuild;
+ __le32 RaidTypeMorphOptions;
+ __le32 FlashFirmwareBootRevision;
+ __le32 FlashFirmwareBootBuild;
+ u8 MfgPcbaSerialNo[12];
+ u8 MfgWWNName[8];
+ __le32 SupportedOptions2;
+ __le32 StructExpansion;
+ /* StructExpansion == 1 */
+ __le32 FeatureBits3;
+ __le32 SupportedPerformanceModes;
+ __le32 ReservedForFutureGrowth[80];
+};
+#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
+#define AAC_FEATURE_JBOD cpu_to_le32(0x08000000)
+/* SupportedOptions2 */
+#define AAC_OPTION_MU_RESET cpu_to_le32(0x00000001)
+#define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002)
+#define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004)
+#define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -892,17 +1010,22 @@ struct aac_bus_info_response {
#define AAC_OPT_HOST_TIME_FIB cpu_to_le32(1<<4)
#define AAC_OPT_RAID50 cpu_to_le32(1<<5)
#define AAC_OPT_4GB_WINDOW cpu_to_le32(1<<6)
-#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
+#define AAC_OPT_SCSI_UPGRADEABLE cpu_to_le32(1<<7)
#define AAC_OPT_SOFT_ERR_REPORT cpu_to_le32(1<<8)
-#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
+#define AAC_OPT_SUPPORTED_RECONDITION cpu_to_le32(1<<9)
#define AAC_OPT_SGMAP_HOST64 cpu_to_le32(1<<10)
#define AAC_OPT_ALARM cpu_to_le32(1<<11)
#define AAC_OPT_NONDASD cpu_to_le32(1<<12)
-#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13)
+#define AAC_OPT_SCSI_MANAGED cpu_to_le32(1<<13)
#define AAC_OPT_RAID_SCSI_MODE cpu_to_le32(1<<14)
#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO cpu_to_le32(1<<16)
#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
+#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
+#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29)
+#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
+#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
+
struct aac_dev
{
@@ -910,16 +1033,16 @@ struct aac_dev
const char *name;
int id;
- u16 irq_mask;
/*
* negotiated FIB settings
*/
unsigned max_fib_size;
unsigned sg_tablesize;
+ unsigned max_num_aif;
/*
* Map for 128 fib objects (64k)
- */
+ */
dma_addr_t hw_fib_pa;
struct hw_fib *hw_fib_va;
struct hw_fib *aif_base_va;
@@ -929,25 +1052,37 @@ struct aac_dev
struct fib *fibs;
struct fib *free_fib;
- struct fib *timeout_fib;
spinlock_t fib_lock;
-
+
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
* FIBs from the adapter. The following list is used to keep
* track of all the threads that have requested these FIBs. The
- * mutex is used to synchronize access to all data associated
+ * mutex is used to synchronize access to all data associated
* with the adapter fibs.
*/
struct list_head fib_list;
struct adapter_ops a_ops;
unsigned long fsrev; /* Main driver's revision number */
-
+
+ resource_size_t base_start; /* main IO base */
+ resource_size_t dbg_base; /* address of UART
+ * debug buffer */
+
+ resource_size_t base_size, dbg_size; /* Size of
+ * mapped in region */
+
struct aac_init *init; /* Holds initialization info to communicate with adapter */
- dma_addr_t init_pa; /* Holds physical address of the init struct */
-
+ dma_addr_t init_pa; /* Holds physical address of the init struct */
+
+ u32 *host_rrq; /* response queue
+ * if AAC_COMM_MESSAGE_TYPE1 */
+
+ dma_addr_t host_rrq_pa; /* phys. address */
+ u32 host_rrq_idx; /* index into rrq buffer */
+
struct pci_dev *pdev; /* Our PCI interface */
void * printfbuf; /* pointer to buffer used for printf's from the adapter */
void * comm_addr; /* Base address of Comm area */
@@ -959,33 +1094,63 @@ struct aac_dev
int maximum_num_physicals;
int maximum_num_channels;
struct fsa_dev_info *fsa_dev;
- pid_t thread_pid;
+ struct task_struct *thread;
int cardtype;
-
+
/*
* The following is the device specific extension.
*/
+#ifndef AAC_MIN_FOOTPRINT_SIZE
+# define AAC_MIN_FOOTPRINT_SIZE 8192
+# define AAC_MIN_SRC_BAR0_SIZE 0x400000
+# define AAC_MIN_SRC_BAR1_SIZE 0x800
+# define AAC_MIN_SRCV_BAR0_SIZE 0x100000
+# define AAC_MIN_SRCV_BAR1_SIZE 0x400
+#endif
union
{
struct sa_registers __iomem *sa;
struct rx_registers __iomem *rx;
struct rkt_registers __iomem *rkt;
+ struct {
+ struct src_registers __iomem *bar0;
+ char __iomem *bar1;
+ } src;
} regs;
+ volatile void __iomem *base, *dbg_base_mapped;
+ volatile struct rx_inbound __iomem *IndexRegs;
u32 OIMR; /* Mask Register Cache */
/*
* AIF thread states
*/
u32 aif_thread;
- struct completion aif_completion;
struct aac_adapter_info adapter_info;
struct aac_supplement_adapter_info supplement_adapter_info;
/* These are in adapter info but they are in the io flow so
* lets break them out so we don't have to do an AND to check them
*/
- u8 nondasd_support;
+ u8 nondasd_support;
+ u8 jbod;
+ u8 cache_protected;
u8 dac_support;
+ u8 needs_dac;
u8 raid_scsi_mode;
+ u8 comm_interface;
+# define AAC_COMM_PRODUCER 0
+# define AAC_COMM_MESSAGE 1
+# define AAC_COMM_MESSAGE_TYPE1 3
+# define AAC_COMM_MESSAGE_TYPE2 4
+ u8 raw_io_interface;
+ u8 raw_io_64;
u8 printf_enabled;
+ u8 in_reset;
+ u8 msi;
+ int management_fib_count;
+ spinlock_t manage_lock;
+ spinlock_t sync_lock;
+ int sync_mode;
+ struct fib *sync_fib;
+ struct list_head sync_fib_list;
};
#define aac_adapter_interrupt(dev) \
@@ -994,26 +1159,58 @@ struct aac_dev
#define aac_adapter_notify(dev, event) \
(dev)->a_ops.adapter_notify(dev, event)
+#define aac_adapter_disable_int(dev) \
+ (dev)->a_ops.adapter_disable_int(dev)
+
+#define aac_adapter_enable_int(dev) \
+ (dev)->a_ops.adapter_enable_int(dev)
+
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
#define aac_adapter_check_health(dev) \
(dev)->a_ops.adapter_check_health(dev)
+#define aac_adapter_restart(dev,bled) \
+ (dev)->a_ops.adapter_restart(dev,bled)
+
+#define aac_adapter_ioremap(dev, size) \
+ (dev)->a_ops.adapter_ioremap(dev, size)
+
+#define aac_adapter_deliver(fib) \
+ ((fib)->dev)->a_ops.adapter_deliver(fib)
+
+#define aac_adapter_bounds(dev,cmd,lba) \
+ dev->a_ops.adapter_bounds(dev,cmd,lba)
+
+#define aac_adapter_read(fib,cmd,lba,count) \
+ ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
+
+#define aac_adapter_write(fib,cmd,lba,count,fua) \
+ ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua)
+
+#define aac_adapter_scsi(fib,cmd) \
+ ((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
+
+#define aac_adapter_comm(dev,comm) \
+ (dev)->a_ops.adapter_comm(dev, comm)
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
+#define FIB_CONTEXT_FLAG (0x00000002)
+#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
+#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008)
/*
* Define the command values
*/
-
+
#define Null 0
-#define GetAttributes 1
-#define SetAttributes 2
-#define Lookup 3
-#define ReadLink 4
-#define Read 5
-#define Write 6
+#define GetAttributes 1
+#define SetAttributes 2
+#define Lookup 3
+#define ReadLink 4
+#define Read 5
+#define Write 6
#define Create 7
#define MakeDirectory 8
#define SymbolicLink 9
@@ -1077,6 +1274,7 @@ struct aac_dev
#define ST_DQUOT 69
#define ST_STALE 70
#define ST_REMOTE 71
+#define ST_NOT_READY 72
#define ST_BADHANDLE 10001
#define ST_NOT_SYNC 10002
#define ST_BAD_COOKIE 10003
@@ -1097,7 +1295,7 @@ struct aac_dev
#define CACHE_UNSTABLE 2
/*
- * Lets the client know at which level the data was commited on
+ * Lets the client know at which level the data was committed on
* a write request
*/
@@ -1107,21 +1305,37 @@ struct aac_dev
#define CMDATA_SYNCH 4
#define CMUNSTABLE 5
+#define RIO_TYPE_WRITE 0x0000
+#define RIO_TYPE_READ 0x0001
+#define RIO_SUREWRITE 0x0008
+
+#define RIO2_IO_TYPE 0x0003
+#define RIO2_IO_TYPE_WRITE 0x0000
+#define RIO2_IO_TYPE_READ 0x0001
+#define RIO2_IO_TYPE_VERIFY 0x0002
+#define RIO2_IO_ERROR 0x0004
+#define RIO2_IO_SUREWRITE 0x0008
+#define RIO2_SGL_CONFORMANT 0x0010
+#define RIO2_SG_FORMAT 0xF000
+#define RIO2_SG_FORMAT_ARC 0x0000
+#define RIO2_SG_FORMAT_SRL 0x1000
+#define RIO2_SG_FORMAT_IEEE1212 0x2000
+
struct aac_read
{
- __le32 command;
- __le32 cid;
- __le32 block;
- __le32 count;
+ __le32 command;
+ __le32 cid;
+ __le32 block;
+ __le32 count;
struct sgmap sg; // Must be last in struct because it is variable
};
struct aac_read64
{
- __le32 command;
- __le16 cid;
- __le16 sector_count;
- __le32 block;
+ __le32 command;
+ __le16 cid;
+ __le16 sector_count;
+ __le32 block;
__le16 pad;
__le16 flags;
struct sgmap64 sg; // Must be last in struct because it is variable
@@ -1129,26 +1343,26 @@ struct aac_read64
struct aac_read_reply
{
- __le32 status;
- __le32 count;
+ __le32 status;
+ __le32 count;
};
struct aac_write
{
__le32 command;
- __le32 cid;
- __le32 block;
- __le32 count;
- __le32 stable; // Not used
+ __le32 cid;
+ __le32 block;
+ __le32 count;
+ __le32 stable; // Not used
struct sgmap sg; // Must be last in struct because it is variable
};
struct aac_write64
{
- __le32 command;
- __le16 cid;
- __le16 sector_count;
- __le32 block;
+ __le32 command;
+ __le16 cid;
+ __le16 sector_count;
+ __le32 block;
__le16 pad;
__le16 flags;
struct sgmap64 sg; // Must be last in struct because it is variable
@@ -1156,10 +1370,37 @@ struct aac_write64
struct aac_write_reply
{
__le32 status;
- __le32 count;
+ __le32 count;
__le32 committed;
};
+struct aac_raw_io
+{
+ __le32 block[2];
+ __le32 count;
+ __le16 cid;
+ __le16 flags; /* 00 W, 01 R */
+ __le16 bpTotal; /* reserved for F/W use */
+ __le16 bpComplete; /* reserved for F/W use */
+ struct sgmapraw sg;
+};
+
+struct aac_raw_io2 {
+ __le32 blockLow;
+ __le32 blockHigh;
+ __le32 byteCount;
+ __le16 cid;
+ __le16 flags; /* RIO2 flags */
+ __le32 sgeFirstSize; /* size of first sge el. */
+ __le32 sgeNominalSize; /* size of 2nd sge el. (if conformant) */
+ u8 sgeCnt; /* only 8 bits required */
+ u8 bpTotal; /* reserved for F/W use */
+ u8 bpComplete; /* reserved for F/W use */
+ u8 sgeFirstIndex; /* reserved for F/W use */
+ u8 unused[4];
+ struct sge_ieee1212 sge[1];
+};
+
#define CT_FLUSH_CACHE 129
struct aac_synchronize {
__le32 command; /* VM_ContainerConfig */
@@ -1184,6 +1425,31 @@ struct aac_synchronize_reply {
u8 data[16];
};
+#define CT_POWER_MANAGEMENT 245
+#define CT_PM_START_UNIT 2
+#define CT_PM_STOP_UNIT 3
+#define CT_PM_UNIT_IMMEDIATE 1
+struct aac_power_management {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_POWER_MANAGEMENT */
+ __le32 sub; /* CT_PM_* */
+ __le32 cid;
+ __le32 parm; /* CT_PM_sub_* */
+};
+
+#define CT_PAUSE_IO 65
+#define CT_RELEASE_IO 66
+struct aac_pause {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_PAUSE_IO */
+ __le32 timeout; /* 10ms ticks */
+ __le32 min;
+ __le32 noRescan;
+ __le32 parm3;
+ __le32 parm4;
+ __le32 count; /* sizeof(((struct aac_pause_reply *)NULL)->data) */
+};
+
struct aac_srb
{
__le32 function;
@@ -1200,7 +1466,7 @@ struct aac_srb
};
/*
- * This and assocated data structs are used by the
+ * This and associated data structs are used by the
* ioctl caller and are in cpu order.
*/
struct user_aac_srb
@@ -1235,10 +1501,10 @@ struct aac_srb_reply
#define SRB_NoDataXfer 0x0000
#define SRB_DisableDisconnect 0x0004
#define SRB_DisableSynchTransfer 0x0008
-#define SRB_BypassFrozenQueue 0x0010
+#define SRB_BypassFrozenQueue 0x0010
#define SRB_DisableAutosense 0x0020
#define SRB_DataIn 0x0040
-#define SRB_DataOut 0x0080
+#define SRB_DataOut 0x0080
/*
* SRB Functions - set in aac_srb->function
@@ -1261,7 +1527,7 @@ struct aac_srb_reply
#define SRBF_RemoveDevice 0x0016
#define SRBF_DomainValidation 0x0017
-/*
+/*
* SRB SCSI Status - set in aac_srb->scsi_status
*/
#define SRB_STATUS_PENDING 0x00
@@ -1321,8 +1587,10 @@ struct aac_srb_reply
#define VM_CtBlockVerify64 18
#define VM_CtHostRead64 19
#define VM_CtHostWrite64 20
+#define VM_DrvErrTblLog 21
+#define VM_NameServe64 22
-#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */
+#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
/*
* Descriptive information (eg, vital stats)
@@ -1418,24 +1686,25 @@ struct aac_get_container_count_resp {
*/
struct aac_mntent {
- __le32 oid;
+ __le32 oid;
u8 name[16]; /* if applicable */
struct creation_info create_info; /* if applicable */
__le32 capacity;
- __le32 vol; /* substrate structure */
- __le32 obj; /* FT_FILESYS,
- FT_DATABASE, etc. */
- __le32 state; /* unready for mounting,
+ __le32 vol; /* substrate structure */
+ __le32 obj; /* FT_FILESYS, etc. */
+ __le32 state; /* unready for mounting,
readonly, etc. */
- union aac_contentinfo fileinfo; /* Info specific to content
+ union aac_contentinfo fileinfo; /* Info specific to content
manager (eg, filesystem) */
- __le32 altoid; /* != oid <==> snapshot or
+ __le32 altoid; /* != oid <==> snapshot or
broken mirror exists */
+ __le32 capacityhigh;
};
-#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
+#define FSCS_NOTCLEAN 0x0001 /* fsck is necessary before mounting */
#define FSCS_READONLY 0x0002 /* possible result of broken mirror */
#define FSCS_HIDDEN 0x0004 /* should be ignored - set during a clear */
+#define FSCS_NOT_READY 0x0008 /* Array spinning up to fulfil request */
struct aac_query_mount {
__le32 command;
@@ -1445,7 +1714,7 @@ struct aac_query_mount {
struct aac_mount {
__le32 status;
- __le32 type; /* should be same as that requested */
+ __le32 type; /* should be same as that requested */
__le32 count;
struct aac_mntent mnt[1];
};
@@ -1462,7 +1731,6 @@ struct aac_get_name {
__le32 count; /* sizeof(((struct aac_get_name_resp *)NULL)->data) */
};
-#define CT_OK 218
struct aac_get_name_resp {
__le32 dummy0;
__le32 dummy1;
@@ -1475,6 +1743,20 @@ struct aac_get_name_resp {
u8 data[16];
};
+#define CT_CID_TO_32BITS_UID 165
+struct aac_get_serial {
+ __le32 command; /* VM_ContainerConfig */
+ __le32 type; /* CT_CID_TO_32BITS_UID */
+ __le32 cid;
+};
+
+struct aac_get_serial_resp {
+ __le32 dummy0;
+ __le32 dummy1;
+ __le32 status; /* CT_OK */
+ __le32 uid;
+};
+
/*
* The following command is sent to shut down each container.
*/
@@ -1502,7 +1784,7 @@ struct aac_delete_disk {
u32 disknum;
u32 cnum;
};
-
+
struct fib_ioctl
{
u32 fibctx;
@@ -1513,12 +1795,13 @@ struct fib_ioctl
struct revision
{
u32 compat;
- u32 version;
- u32 build;
+ __le32 version;
+ __le32 build;
};
-
+
+
/*
- * Ugly - non Linux like ioctl coding for back compat.
+ * Ugly - non Linux like ioctl coding for back compat.
*/
#define CTL_CODE(function, method) ( \
@@ -1526,7 +1809,7 @@ struct revision
)
/*
- * Define the method codes for how buffers are passed for I/O and FS
+ * Define the method codes for how buffers are passed for I/O and FS
* controls
*/
@@ -1537,15 +1820,15 @@ struct revision
* Filesystem ioctls
*/
-#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
-#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
+#define FSACTL_SENDFIB CTL_CODE(2050, METHOD_BUFFERED)
+#define FSACTL_SEND_RAW_SRB CTL_CODE(2067, METHOD_BUFFERED)
#define FSACTL_DELETE_DISK 0x163
#define FSACTL_QUERY_DISK 0x173
#define FSACTL_OPEN_GET_ADAPTER_FIB CTL_CODE(2100, METHOD_BUFFERED)
#define FSACTL_GET_NEXT_ADAPTER_FIB CTL_CODE(2101, METHOD_BUFFERED)
#define FSACTL_CLOSE_GET_ADAPTER_FIB CTL_CODE(2102, METHOD_BUFFERED)
#define FSACTL_MINIPORT_REV_CHECK CTL_CODE(2107, METHOD_BUFFERED)
-#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
+#define FSACTL_GET_PCI_INFO CTL_CODE(2119, METHOD_BUFFERED)
#define FSACTL_FORCE_DELETE_DISK CTL_CODE(2120, METHOD_NEITHER)
#define FSACTL_GET_CONTAINERS 2131
#define FSACTL_SEND_LARGE_FIB CTL_CODE(2138, METHOD_BUFFERED)
@@ -1554,7 +1837,7 @@ struct revision
struct aac_common
{
/*
- * If this value is set to 1 then interrupt moderation will occur
+ * If this value is set to 1 then interrupt moderation will occur
* in the base commuication support.
*/
u32 irq_mod;
@@ -1583,11 +1866,11 @@ extern struct aac_common aac_config;
* The following macro is used when sending and receiving FIBs. It is
* only used for debugging.
*/
-
+
#ifdef DBG
#define FIB_COUNTER_INCREMENT(counter) (counter)++
#else
-#define FIB_COUNTER_INCREMENT(counter)
+#define FIB_COUNTER_INCREMENT(counter)
#endif
/*
@@ -1607,6 +1890,7 @@ extern struct aac_common aac_config;
#define RCV_TEMP_READINGS 0x00000025
#define GET_COMM_PREFERRED_SETTINGS 0x00000026
#define IOP_RESET 0x00001000
+#define IOP_RESET_ALWAYS 0x00001001
#define RE_INIT_ADAPTER 0x000000ee
/*
@@ -1618,22 +1902,26 @@ extern struct aac_common aac_config;
*
* The adapter reports is present state through the phase. Only
* a single phase should be ever be set. Each phase can have multiple
- * phase status bits to provide more detailed information about the
- * state of the board. Care should be taken to ensure that any phase
+ * phase status bits to provide more detailed information about the
+ * state of the board. Care should be taken to ensure that any phase
* status bits that are set when changing the phase are also valid
* for the new phase or be cleared out. Adapter software (monitor,
- * iflash, kernel) is responsible for properly maintining the phase
+ * iflash, kernel) is responsible for properly maintining the phase
* status mailbox when it is running.
- *
- * MONKER_API Phases
*
- * Phases are bit oriented. It is NOT valid to have multiple bits set
- */
+ * MONKER_API Phases
+ *
+ * Phases are bit oriented. It is NOT valid to have multiple bits set
+ */
#define SELF_TEST_FAILED 0x00000004
#define MONITOR_PANIC 0x00000020
#define KERNEL_UP_AND_RUNNING 0x00000080
#define KERNEL_PANIC 0x00000100
+#define FLASH_UPD_PENDING 0x00002000
+#define FLASH_UPD_SUCCESS 0x00004000
+#define FLASH_UPD_FAILED 0x00008000
+#define FWUPD_TIMEOUT (5 * 60)
/*
* Doorbell bit defines
@@ -1646,41 +1934,55 @@ extern struct aac_common aac_config;
#define DoorBellAdapterNormCmdNotFull (1<<3) /* Adapter -> Host */
#define DoorBellAdapterNormRespNotFull (1<<4) /* Adapter -> Host */
#define DoorBellPrintfReady (1<<5) /* Adapter -> Host */
+#define DoorBellAifPending (1<<6) /* Adapter -> Host */
+
+/* PMC specific outbound doorbell bits */
+#define PmDoorBellResponseSent (1<<1) /* Adapter -> Host */
/*
* For FIB communication, we need all of the following things
* to send back to the user.
*/
-
-#define AifCmdEventNotify 1 /* Notify of event */
+
+#define AifCmdEventNotify 1 /* Notify of event */
#define AifEnConfigChange 3 /* Adapter configuration change */
#define AifEnContainerChange 4 /* Container configuration change */
#define AifEnDeviceFailure 5 /* SCSI device failed */
+#define AifEnEnclosureManagement 13 /* EM_DRIVE_* */
+#define EM_DRIVE_INSERTION 31
+#define EM_DRIVE_REMOVAL 32
+#define AifEnBatteryEvent 14 /* Change in Battery State */
#define AifEnAddContainer 15 /* A new array was created */
#define AifEnDeleteContainer 16 /* A container was deleted */
#define AifEnExpEvent 23 /* Firmware Event Log */
#define AifExeFirmwarePanic 3 /* Firmware Event Panic */
#define AifHighPriority 3 /* Highest Priority Event */
+#define AifEnAddJBOD 30 /* JBOD created */
+#define AifEnDeleteJBOD 31 /* JBOD deleted */
#define AifCmdJobProgress 2 /* Progress report */
#define AifJobCtrZero 101 /* Array Zero progress */
#define AifJobStsSuccess 1 /* Job completes */
+#define AifJobStsRunning 102 /* Job running */
#define AifCmdAPIReport 3 /* Report from other user of API */
#define AifCmdDriverNotify 4 /* Notify host driver of event */
#define AifDenMorphComplete 200 /* A morph operation completed */
#define AifDenVolumeExtendComplete 201 /* A volume extend completed */
#define AifReqJobList 100 /* Gets back complete job list */
#define AifReqJobsForCtr 101 /* Gets back jobs for specific container */
-#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
-#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
+#define AifReqJobsForScsi 102 /* Gets back jobs for specific SCSI device */
+#define AifReqJobReport 103 /* Gets back a specific job report or list of them */
#define AifReqTerminateJob 104 /* Terminates job */
#define AifReqSuspendJob 105 /* Suspends a job */
-#define AifReqResumeJob 106 /* Resumes a job */
+#define AifReqResumeJob 106 /* Resumes a job */
#define AifReqSendAPIReport 107 /* API generic report requests */
#define AifReqAPIJobStart 108 /* Start a job from the API */
#define AifReqAPIJobUpdate 109 /* Update a job report from the API */
#define AifReqAPIJobFinish 110 /* Finish a job from the API */
+/* PMC NEW COMM: Request the event data */
+#define AifReqEvent 200
+
/*
* Adapter Initiated FIB command structures. Start with the adapter
* initiated FIBs that really come from the adapter, and get responded
@@ -1694,46 +1996,78 @@ struct aac_aifcmd {
};
/**
- * Convert capacity to cylinders
- * accounting for the fact capacity could be a 64 bit value
+ * Convert capacity to cylinders
+ * accounting for the fact capacity could be a 64 bit value
*
*/
-static inline u32 cap_to_cyls(sector_t capacity, u32 divisor)
+static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
{
sector_div(capacity, divisor);
- return (u32)capacity;
+ return capacity;
}
-struct scsi_cmnd;
+/* SCp.phase values */
+#define AAC_OWNER_MIDLEVEL 0x101
+#define AAC_OWNER_LOWLEVEL 0x102
+#define AAC_OWNER_ERROR_HANDLER 0x103
+#define AAC_OWNER_FIRMWARE 0x106
const char *aac_driverinfo(struct Scsi_Host *);
-struct fib *fib_alloc(struct aac_dev *dev);
-int fib_setup(struct aac_dev *dev);
-void fib_map_free(struct aac_dev *dev);
-void fib_free(struct fib * context);
-void fib_init(struct fib * context);
+struct fib *aac_fib_alloc(struct aac_dev *dev);
+int aac_fib_setup(struct aac_dev *dev);
+void aac_fib_map_free(struct aac_dev *dev);
+void aac_fib_free(struct fib * context);
+void aac_fib_init(struct fib * context);
void aac_printf(struct aac_dev *dev, u32 val);
-int fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
-int fib_complete(struct fib * context);
-#define fib_data(fibctx) ((void *)(fibctx)->hw_fib->data)
+int aac_fib_complete(struct fib * context);
+#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
-int aac_get_config_status(struct aac_dev *dev);
+int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+#ifndef shost_to_class
+#define shost_to_class(shost) &shost->shost_dev
+#endif
+ssize_t aac_get_serial_number(struct device *dev, char *buf);
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
int aac_rx_init(struct aac_dev *dev);
int aac_rkt_init(struct aac_dev *dev);
+int aac_nark_init(struct aac_dev *dev);
int aac_sa_init(struct aac_dev *dev);
+int aac_src_init(struct aac_dev *dev);
+int aac_srcv_init(struct aac_dev *dev);
+int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
unsigned int aac_response_normal(struct aac_queue * q);
unsigned int aac_command_normal(struct aac_queue * q);
-int aac_command_thread(struct aac_dev * dev);
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
+ int isAif, int isFastResponse,
+ struct hw_fib *aif_fib);
+int aac_reset_adapter(struct aac_dev * dev, int forced);
+int aac_check_health(struct aac_dev * dev);
+int aac_command_thread(void *data);
int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
-int fib_adapter_complete(struct fib * fibptr, unsigned short size);
+int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
struct aac_driver_ident* aac_get_driver_ident(int devtype);
int aac_get_adapter_info(struct aac_dev* dev);
int aac_send_shutdown(struct aac_dev *dev);
+int aac_probe_container(struct aac_dev *dev, int cid);
+int _aac_rx_init(struct aac_dev *dev);
+int aac_rx_select_comm(struct aac_dev *dev, int comm);
+int aac_rx_deliver_producer(struct fib * fib);
+char * get_container_type(unsigned type);
extern int numacb;
extern int acbsize;
+extern char aac_driver_version[];
+extern int startup_timeout;
+extern int aif_timeout;
+extern int expose_physicals;
+extern int aac_reset_devices;
+extern int aac_msi;
+extern int aac_commit;
+extern int update_interval;
+extern int check_interval;
+extern int aac_check_reset;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 85387099aab..fbcd48d0bfc 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,15 +32,17 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
-#include <asm/semaphore.h>
+#include <linux/delay.h> /* ssleep prototype */
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
#include <asm/uaccess.h>
+#include <scsi/scsi_host.h>
#include "aacraid.h"
@@ -47,13 +50,13 @@
* ioctl_send_fib - send a FIB from userspace
* @dev: adapter is being processed
* @arg: arguments to the ioctl call
- *
+ *
* This routine sends a fib to the adapter on behalf of a user level
* program.
*/
# define AAC_DEBUG_PREAMBLE KERN_INFO
# define AAC_DEBUG_POSTAMBLE
-
+
static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
{
struct hw_fib * kfib;
@@ -63,17 +66,20 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
unsigned size;
int retval;
- fibptr = fib_alloc(dev);
+ if (dev->in_reset) {
+ return -EBUSY;
+ }
+ fibptr = aac_fib_alloc(dev);
if(fibptr == NULL) {
return -ENOMEM;
}
-
- kfib = fibptr->hw_fib;
+
+ kfib = fibptr->hw_fib_va;
/*
* First copy in the header so that we can check the size field.
*/
if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
- fib_free(fibptr);
+ aac_fib_free(fibptr);
return -EFAULT;
}
/*
@@ -85,10 +91,24 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
if (size < le16_to_cpu(kfib->header.SenderSize))
size = le16_to_cpu(kfib->header.SenderSize);
if (size > dev->max_fib_size) {
+ dma_addr_t daddr;
+
+ if (size > 2048) {
+ retval = -EINVAL;
+ goto cleanup;
+ }
+
+ kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
+ if (!kfib) {
+ retval = -ENOMEM;
+ goto cleanup;
+ }
+
/* Highjack the hw_fib */
- hw_fib = fibptr->hw_fib;
+ hw_fib = fibptr->hw_fib_va;
hw_fib_pa = fibptr->hw_fib_pa;
- fibptr->hw_fib = kfib = pci_alloc_consistent(dev->pdev, size, &fibptr->hw_fib_pa);
+ fibptr->hw_fib_va = kfib;
+ fibptr->hw_fib_pa = daddr;
memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
memcpy(kfib, hw_fib, dev->max_fib_size);
}
@@ -101,18 +121,18 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
aac_adapter_interrupt(dev);
/*
- * Since we didn't really send a fib, zero out the state to allow
+ * Since we didn't really send a fib, zero out the state to allow
* cleanup code not to assert.
*/
kfib->header.XferState = 0;
} else {
- retval = fib_send(le16_to_cpu(kfib->header.Command), fibptr,
+ retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
le16_to_cpu(kfib->header.Size) , FsaNormal,
1, 1, NULL, NULL);
if (retval) {
goto cleanup;
}
- if (fib_complete(fibptr) != 0) {
+ if (aac_fib_complete(fibptr) != 0) {
retval = -EINVAL;
goto cleanup;
}
@@ -132,9 +152,10 @@ cleanup:
if (hw_fib) {
pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
fibptr->hw_fib_pa = hw_fib_pa;
- fibptr->hw_fib = hw_fib;
+ fibptr->hw_fib_va = hw_fib;
}
- fib_free(fibptr);
+ if (retval != -ERESTARTSYS)
+ aac_fib_free(fibptr);
return retval;
}
@@ -160,7 +181,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
fibctx->size = sizeof(struct aac_fib_context);
- /*
+ /*
* Yes yes, I know this could be an index, but we have a
* better guarantee of uniqueness for the locked loop below.
* Without the aid of a persistent history, this also helps
@@ -170,7 +191,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Initialize the mutex used to wait for the next AIF.
*/
- init_MUTEX_LOCKED(&fibctx->wait_sem);
+ sema_init(&fibctx->wait_sem, 0);
fibctx->wait = 0;
/*
* Initialize the fibs and set the count of fibs on
@@ -180,7 +201,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
INIT_LIST_HEAD(&fibctx->fib_list);
fibctx->jiffies = jiffies/HZ;
/*
- * Now add this context onto the adapter's
+ * Now add this context onto the adapter's
* AdapterFibContext list.
*/
spin_lock_irqsave(&dev->fib_lock, flags);
@@ -198,12 +219,12 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
}
list_add_tail(&fibctx->next, &dev->fib_list);
spin_unlock_irqrestore(&dev->fib_lock, flags);
- if (copy_to_user(arg, &fibctx->unique,
+ if (copy_to_user(arg, &fibctx->unique,
sizeof(fibctx->unique))) {
status = -EFAULT;
} else {
status = 0;
- }
+ }
}
return status;
}
@@ -212,8 +233,8 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
* next_getadapter_fib - get the next fib
* @dev: adapter to use
* @arg: ioctl argument
- *
- * This routine will get the next Fib, if available, from the AdapterFibContext
+ *
+ * This routine will get the next Fib, if available, from the AdapterFibContext
* passed in from the user.
*/
@@ -225,7 +246,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
int status;
struct list_head * entry;
unsigned long flags;
-
+
if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
return -EFAULT;
/*
@@ -234,6 +255,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
+ spin_lock_irqsave(&dev->fib_lock, flags);
entry = dev->fib_list.next;
fibctx = NULL;
@@ -242,57 +264,67 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Extract the AdapterFibContext from the Input parameters.
*/
- if (fibctx->unique == f.fibctx) { /* We found a winner */
+ if (fibctx->unique == f.fibctx) { /* We found a winner */
break;
}
entry = entry->next;
fibctx = NULL;
}
if (!fibctx) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context not found\n"));
return -EINVAL;
}
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context))) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
return -EINVAL;
}
status = 0;
- spin_lock_irqsave(&dev->fib_lock, flags);
/*
* If there are no fibs to send back, then either wait or return
* -EAGAIN
*/
return_fib:
if (!list_empty(&fibctx->fib_list)) {
- struct list_head * entry;
/*
* Pull the next fib from the fibs
*/
entry = fibctx->fib_list.next;
list_del(entry);
-
+
fib = list_entry(entry, struct fib, fiblink);
fibctx->count--;
spin_unlock_irqrestore(&dev->fib_lock, flags);
- if (copy_to_user(f.fib, fib->hw_fib, sizeof(struct hw_fib))) {
- kfree(fib->hw_fib);
+ if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
+ kfree(fib->hw_fib_va);
kfree(fib);
return -EFAULT;
- }
+ }
/*
* Free the space occupied by this copy of the fib.
*/
- kfree(fib->hw_fib);
+ kfree(fib->hw_fib_va);
kfree(fib);
status = 0;
- fibctx->jiffies = jiffies/HZ;
} else {
spin_unlock_irqrestore(&dev->fib_lock, flags);
+ /* If someone killed the AIF aacraid thread, restart it */
+ status = !dev->aif_thread;
+ if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
+ /* Be paranoid, be very paranoid! */
+ kthread_stop(dev->thread);
+ ssleep(1);
+ dev->aif_thread = 0;
+ dev->thread = kthread_run(aac_command_thread, dev,
+ "%s", dev->name);
+ ssleep(1);
+ }
if (f.wait) {
if(down_interruptible(&fibctx->wait_sem) < 0) {
- status = -EINTR;
+ status = -ERESTARTSYS;
} else {
/* Lock again and retry */
spin_lock_irqsave(&dev->fib_lock, flags);
@@ -300,8 +332,9 @@ return_fib:
}
} else {
status = -EAGAIN;
- }
+ }
}
+ fibctx->jiffies = jiffies/HZ;
return status;
}
@@ -324,7 +357,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
/*
* Free the space occupied by this copy of the fib.
*/
- kfree(fib->hw_fib);
+ kfree(fib->hw_fib_va);
kfree(fib);
}
/*
@@ -349,7 +382,7 @@ int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
*
* This routine will close down the fibctx passed in from the user.
*/
-
+
static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
{
struct aac_fib_context *fibctx;
@@ -372,10 +405,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Extract the fibctx from the input parameters
*/
- if (fibctx->unique == (u32)(unsigned long)arg) {
- /* We found a winner */
+ if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
break;
- }
entry = entry->next;
fibctx = NULL;
}
@@ -398,17 +429,27 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
* @arg: ioctl arguments
*
* This routine returns the driver version.
- * Under Linux, there have been no version incompatibilities, so this is
- * simple!
+ * Under Linux, there have been no version incompatibilities, so this is
+ * simple!
*/
static int check_revision(struct aac_dev *dev, void __user *arg)
{
struct revision response;
+ char *driver_version = aac_driver_version;
+ u32 version;
response.compat = 1;
- response.version = le32_to_cpu(dev->adapter_info.kernelrev);
- response.build = le32_to_cpu(dev->adapter_info.kernelbuild);
+ version = (simple_strtol(driver_version,
+ &driver_version, 10) << 24) | 0x00000400;
+ version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
+ version += simple_strtol(driver_version + 1, NULL, 10);
+ response.version = cpu_to_le32(version);
+# ifdef AAC_DRIVER_BUILD
+ response.build = cpu_to_le32(AAC_DRIVER_BUILD);
+# else
+ response.build = cpu_to_le32(9999);
+# endif
if (copy_to_user(arg, &response, sizeof(response)))
return -EFAULT;
@@ -437,34 +478,41 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
- u32 sg_indx = 0;
+ u32 sg_indx = 0;
u32 byte_count = 0;
- u32 actual_fibsize = 0;
+ u32 actual_fibsize64, actual_fibsize = 0;
int i;
+ if (dev->in_reset) {
+ dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
+ return -EBUSY;
+ }
if (!capable(CAP_SYS_ADMIN)){
- dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
+ dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
return -EPERM;
}
/*
- * Allocate and initialize a Fib then setup a BlockWrite command
+ * Allocate and initialize a Fib then setup a SRB command
*/
- if (!(srbfib = fib_alloc(dev))) {
+ if (!(srbfib = aac_fib_alloc(dev))) {
return -ENOMEM;
}
- fib_init(srbfib);
+ aac_fib_init(srbfib);
+ /* raw_srb FIB is not FastResponseCapable */
+ srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
srbcmd = (struct aac_srb*) fib_data(srbfib);
memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
- dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
rcode = -EFAULT;
goto cleanup;
}
- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
+ if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
+ (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
rcode = -EINVAL;
goto cleanup;
}
@@ -476,7 +524,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
if(copy_from_user(user_srbcmd, user_srb,fibsize)){
- dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
rcode = -EFAULT;
goto cleanup;
}
@@ -487,15 +535,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
// Fix up srb for endian and force some values
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
- srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
+ srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
- srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
- srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
- srbcmd->flags = cpu_to_le32(flags);
+ srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
+ srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
+ srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
-
+
switch (flags & (SRB_DataIn | SRB_DataOut)) {
case SRB_DataOut:
data_dir = DMA_TO_DEVICE;
@@ -509,148 +557,246 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
default:
data_dir = DMA_NONE;
}
- if (user_srbcmd->sg.count > (sizeof(sg_list)/sizeof(sg_list[0]))) {
+ if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
le32_to_cpu(srbcmd->sg.count)));
rcode = -EINVAL;
goto cleanup;
}
- if (dev->dac_support == 1) {
+ actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
+ actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
+ (sizeof(struct sgentry64) - sizeof(struct sgentry));
+ /* User made a mistake - should not continue */
+ if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
+ dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
+ "Raw SRB command calculated fibsize=%lu;%lu "
+ "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
+ "issued fibsize=%d\n",
+ actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
+ sizeof(struct aac_srb), sizeof(struct sgentry),
+ sizeof(struct sgentry64), fibsize));
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
+ dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ byte_count = 0;
+ if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
- struct user_sgmap* usg;
- byte_count = 0;
/*
* This should also catch if user used the 32 bit sgmap
*/
- actual_fibsize = sizeof(struct aac_srb) -
- sizeof(struct sgentry) +
- ((upsg->count & 0xff) *
- sizeof(struct sgentry));
- if(actual_fibsize != fibsize){ // User made a mistake - should not continue
- dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n"));
- rcode = -EINVAL;
- goto cleanup;
- }
- usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
- + sizeof(struct sgmap), GFP_KERNEL);
- if (!usg) {
- dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
- rcode = -ENOMEM;
- goto cleanup;
- }
- memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
- + sizeof(struct sgmap));
- actual_fibsize = sizeof(struct aac_srb) -
- sizeof(struct sgentry) + ((usg->count & 0xff) *
- sizeof(struct sgentry64));
- if ((data_dir == DMA_NONE) && upsg->count) {
- kfree (usg);
- dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
- rcode = -EINVAL;
- goto cleanup;
- }
+ if (actual_fibsize64 == fibsize) {
+ actual_fibsize = actual_fibsize64;
+ for (i = 0; i < upsg->count; i++) {
+ u64 addr;
+ void* p;
+ if (upsg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ /* Does this really need to be GFP_DMA? */
+ p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ if(!p) {
+ dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ upsg->sg[i].count,i,upsg->count));
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ addr = (u64)upsg->sg[i].addr[0];
+ addr += ((u64)upsg->sg[i].addr[1]) << 32;
+ sg_user[i] = (void __user *)(uintptr_t)addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
- for (i = 0; i < usg->count; i++) {
- u64 addr;
- void* p;
- /* Does this really need to be GFP_DMA? */
- p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
- if(p == 0) {
- kfree (usg);
- dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- usg->sg[i].count,i,usg->count));
+ psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+ psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+ byte_count += upsg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+ }
+ } else {
+ struct user_sgmap* usg;
+ usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
+ + sizeof(struct sgmap), GFP_KERNEL);
+ if (!usg) {
+ dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
rcode = -ENOMEM;
goto cleanup;
}
- sg_user[i] = (void __user *)usg->sg[i].addr;
- sg_list[i] = p; // save so we can clean up later
- sg_indx = i;
-
- if( flags & SRB_DataOut ){
- if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
- kfree (usg);
- dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
- rcode = -EFAULT;
+ memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
+ + sizeof(struct sgmap));
+ actual_fibsize = actual_fibsize64;
+
+ for (i = 0; i < usg->count; i++) {
+ u64 addr;
+ void* p;
+ if (usg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ kfree(usg);
+ rcode = -EINVAL;
goto cleanup;
}
- }
- addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+ /* Does this really need to be GFP_DMA? */
+ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ if(!p) {
+ dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ usg->sg[i].count,i,usg->count));
+ kfree(usg);
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+ kfree (usg);
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
- psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
- psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
- psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
- byte_count += usg->sg[i].count;
+ psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+ psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+ byte_count += usg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
+ }
+ kfree (usg);
}
- kfree (usg);
-
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
- status = fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
+ status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
} else {
struct user_sgmap* upsg = &user_srbcmd->sg;
struct sgmap* psg = &srbcmd->sg;
- byte_count = 0;
- actual_fibsize = sizeof (struct aac_srb) + (((user_srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
- if(actual_fibsize != fibsize){ // User made a mistake - should not continue
- dprintk((KERN_DEBUG"aacraid: Bad Size specified in Raw SRB command\n"));
- rcode = -EINVAL;
- goto cleanup;
- }
- if ((data_dir == DMA_NONE) && upsg->count) {
- dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
- rcode = -EINVAL;
- goto cleanup;
- }
- for (i = 0; i < upsg->count; i++) {
- dma_addr_t addr;
- void* p;
- p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
- if(p == 0) {
- dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- upsg->sg[i].count, i, upsg->count));
- rcode = -ENOMEM;
- goto cleanup;
- }
- sg_user[i] = (void __user *)upsg->sg[i].addr;
- sg_list[i] = p; // save so we can clean up later
- sg_indx = i;
-
- if( flags & SRB_DataOut ){
- if(copy_from_user(p, sg_user[i],
- upsg->sg[i].count)) {
- dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
- rcode = -EFAULT;
+ if (actual_fibsize64 == fibsize) {
+ struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
+ for (i = 0; i < upsg->count; i++) {
+ uintptr_t addr;
+ void* p;
+ if (usg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ /* Does this really need to be GFP_DMA? */
+ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+ if(!p) {
+ dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ usg->sg[i].count,i,usg->count));
+ rcode = -ENOMEM;
goto cleanup;
}
+ addr = (u64)usg->sg[i].addr[0];
+ addr += ((u64)usg->sg[i].addr[1]) << 32;
+ sg_user[i] = (void __user *)addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+
+ psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
+ byte_count += usg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
}
- addr = pci_map_single(dev->pdev, p,
- upsg->sg[i].count, data_dir);
+ } else {
+ for (i = 0; i < upsg->count; i++) {
+ dma_addr_t addr;
+ void* p;
+ if (upsg->sg[i].count >
+ ((dev->adapter_info.options &
+ AAC_OPT_NEW_COMM) ?
+ (dev->scsi_host_ptr->max_sectors << 9) :
+ 65536)) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
+ if (!p) {
+ dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ upsg->sg[i].count, i, upsg->count));
+ rcode = -ENOMEM;
+ goto cleanup;
+ }
+ sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
+ sg_list[i] = p; // save so we can clean up later
+ sg_indx = i;
+
+ if (flags & SRB_DataOut) {
+ if(copy_from_user(p, sg_user[i],
+ upsg->sg[i].count)) {
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ }
+ addr = pci_map_single(dev->pdev, p,
+ upsg->sg[i].count, data_dir);
- psg->sg[i].addr = cpu_to_le32(addr);
- psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
- byte_count += upsg->sg[i].count;
+ psg->sg[i].addr = cpu_to_le32(addr);
+ byte_count += upsg->sg[i].count;
+ psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+ }
}
srbcmd->count = cpu_to_le32(byte_count);
psg->count = cpu_to_le32(sg_indx+1);
- status = fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
+ status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
+ }
+ if (status == -ERESTARTSYS) {
+ rcode = -ERESTARTSYS;
+ goto cleanup;
}
if (status != 0){
- dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
rcode = -ENXIO;
goto cleanup;
}
- if( flags & SRB_DataIn ) {
+ if (flags & SRB_DataIn) {
for(i = 0 ; i <= sg_indx; i++){
- byte_count = le32_to_cpu((dev->dac_support == 1)
+ byte_count = le32_to_cpu(
+ (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
: srbcmd->sg.sg[i].count);
if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
- dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
rcode = -EFAULT;
goto cleanup;
@@ -660,7 +806,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
reply = (struct aac_srb_reply *) fib_data(srbfib);
if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
- dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
+ dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
rcode = -EFAULT;
goto cleanup;
}
@@ -670,43 +816,45 @@ cleanup:
for(i=0; i <= sg_indx; i++){
kfree(sg_list[i]);
}
- fib_complete(srbfib);
- fib_free(srbfib);
+ if (rcode != -ERESTARTSYS) {
+ aac_fib_complete(srbfib);
+ aac_fib_free(srbfib);
+ }
return rcode;
}
struct aac_pci_info {
- u32 bus;
- u32 slot;
+ u32 bus;
+ u32 slot;
};
static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
{
- struct aac_pci_info pci_info;
+ struct aac_pci_info pci_info;
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
- if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
- dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
- return -EFAULT;
+ if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+ dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
+ return -EFAULT;
}
- return 0;
+ return 0;
}
-
+
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
{
int status;
-
+
/*
* HBA gets first crack
*/
-
+
status = aac_dev_ioctl(dev, cmd, arg);
- if(status != -ENOTTY)
+ if (status != -ENOTTY)
return status;
switch (cmd) {
@@ -734,7 +882,7 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
break;
default:
status = -ENOTTY;
- break;
+ break;
}
return status;
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 43557bf661f..177b094c779 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -40,11 +40,12 @@
#include <linux/completion.h>
#include <linux/mm.h>
#include <scsi/scsi_host.h>
-#include <asm/semaphore.h>
#include "aacraid.h"
-struct aac_common aac_config;
+struct aac_common aac_config = {
+ .irq_mod = 1
+};
static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
{
@@ -52,11 +53,17 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
unsigned long size, align;
const unsigned long fibsize = 4096;
const unsigned long printfbufsiz = 256;
+ unsigned long host_rrq_size = 0;
struct aac_init *init;
dma_addr_t phys;
-
- size = fibsize + sizeof(struct aac_init) + commsize + commalign + printfbufsiz;
-
+ unsigned long aac_max_hostphysmempages;
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2)
+ host_rrq_size = (dev->scsi_host_ptr->can_queue
+ + AAC_NUM_MGT_FIB) * sizeof(u32);
+ size = fibsize + sizeof(struct aac_init) + commsize +
+ commalign + printfbufsiz + host_rrq_size;
base = pci_alloc_consistent(dev->pdev, size, &phys);
@@ -69,8 +76,15 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
dev->comm_phys = phys;
dev->comm_size = size;
- dev->init = (struct aac_init *)(base + fibsize);
- dev->init_pa = phys + fibsize;
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ dev->host_rrq = (u32 *)(base + fibsize);
+ dev->host_rrq_pa = phys + fibsize;
+ memset(dev->host_rrq, 0, host_rrq_size);
+ }
+
+ dev->init = (struct aac_init *)(base + fibsize + host_rrq_size);
+ dev->init_pa = phys + fibsize + host_rrq_size;
init = dev->init;
@@ -90,43 +104,57 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
init->AdapterFibsSize = cpu_to_le32(fibsize);
init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
- /*
+ /*
* number of 4k pages of host physical memory. The aacraid fw needs
- * this number to be less than 4gb worth of pages. num_physpages is in
- * system page units. New firmware doesn't have any issues with the
- * mapping system, but older Firmware did, and had *troubles* dealing
- * with the math overloading past 32 bits, thus we must limit this
- * field.
- *
- * This assumes the memory is mapped zero->n, which isnt
- * always true on real computers. It also has some slight problems
- * with the GART on x86-64. I've btw never tried DMA from PCI space
- * on this platform but don't be suprised if its problematic.
+ * this number to be less than 4gb worth of pages. New firmware doesn't
+ * have any issues with the mapping system, but older Firmware did, and
+ * had *troubles* dealing with the math overloading past 32 bits, thus
+ * we must limit this field.
*/
-#ifndef CONFIG_GART_IOMMU
- if ((num_physpages << (PAGE_SHIFT - 12)) <= AAC_MAX_HOSTPHYSMEMPAGES) {
- init->HostPhysMemPages =
- cpu_to_le32(num_physpages << (PAGE_SHIFT-12));
- } else
-#endif
- {
+ aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12;
+ if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
+ init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages);
+ else
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
- }
- init->InitFlags = 0;
+ init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
+ INITFLAGS_DRIVER_SUPPORTS_PM);
init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
+ init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE) {
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
+ dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
+ init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
+ dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n"));
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
+ init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
+ init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */
+ dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
+ }
/*
* Increment the base address by the amount already used
*/
- base = base + fibsize + sizeof(struct aac_init);
- phys = (dma_addr_t)((ulong)phys + fibsize + sizeof(struct aac_init));
+ base = base + fibsize + host_rrq_size + sizeof(struct aac_init);
+ phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
+ sizeof(struct aac_init));
+
/*
* Align the beginning of Headers to commalign
*/
- align = (commalign - ((unsigned long)(base) & (commalign - 1)));
+ align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
base = base + align;
phys = phys + align;
/*
@@ -153,7 +181,6 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
{
q->numpending = 0;
q->dev = dev;
- INIT_LIST_HEAD(&q->pendingq);
init_waitqueue_head(&q->cmdready);
INIT_LIST_HEAD(&q->cmdq);
init_waitqueue_head(&q->qfull);
@@ -179,26 +206,28 @@ int aac_send_shutdown(struct aac_dev * dev)
struct aac_close *cmd;
int status;
- fibctx = fib_alloc(dev);
+ fibctx = aac_fib_alloc(dev);
if (!fibctx)
return -ENOMEM;
- fib_init(fibctx);
+ aac_fib_init(fibctx);
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
- cmd->cid = cpu_to_le32(0xffffffff);
+ cmd->cid = cpu_to_le32(0xfffffffe);
- status = fib_send(ContainerCommand,
+ status = aac_fib_send(ContainerCommand,
fibctx,
sizeof(struct aac_close),
FsaNormal,
- 1, 1,
+ -2 /* Timeout silently */, 1,
NULL, NULL);
- if (status == 0)
- fib_complete(fibctx);
- fib_free(fibctx);
+ if (status >= 0)
+ aac_fib_complete(fibctx);
+ /* FIB should be freed only after getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibctx);
return status;
}
@@ -230,7 +259,7 @@ static int aac_comm_init(struct aac_dev * dev)
spin_lock_init(&dev->fib_lock);
/*
- * Allocate the physically contigous space for the commuication
+ * Allocate the physically contiguous space for the commuication
* queue headers.
*/
@@ -304,15 +333,61 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
{
u32 status[5];
struct Scsi_Host * host = dev->scsi_host_ptr;
+ extern int aac_sync_mode;
/*
* Check the preferred comm settings, defaults from template.
*/
+ dev->management_fib_count = 0;
+ spin_lock_init(&dev->manage_lock);
+ spin_lock_init(&dev->sync_lock);
dev->max_fib_size = sizeof(struct hw_fib);
dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
- sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgmap))
- / sizeof(struct sgmap);
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
+ dev->comm_interface = AAC_COMM_PRODUCER;
+ dev->raw_io_interface = dev->raw_io_64 = 0;
+
+ if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
+ 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
+ (status[0] == 0x00000001)) {
+ if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
+ dev->raw_io_64 = 1;
+ dev->sync_mode = aac_sync_mode;
+ if (dev->a_ops.adapter_comm &&
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
+ dev->comm_interface = AAC_COMM_MESSAGE;
+ dev->raw_io_interface = 1;
+ if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
+ /* driver supports TYPE1 (Tupelo) */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+ } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
+ /* driver supports TYPE2 (Denali) */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
+ } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) {
+ /* driver doesn't TYPE3 and TYPE4 */
+ /* switch to sync. mode */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
+ dev->sync_mode = 1;
+ }
+ }
+ if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
+ (status[2] > dev->base_size)) {
+ aac_adapter_ioremap(dev, 0);
+ dev->base_size = status[2];
+ if (aac_adapter_ioremap(dev, status[2])) {
+ /* remap failed, go back ... */
+ dev->comm_interface = AAC_COMM_PRODUCER;
+ if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
+ printk(KERN_WARNING
+ "aacraid: unable to map adapter.\n");
+ return NULL;
+ }
+ }
+ }
+ }
if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
0, 0, 0, 0, 0, 0,
status+0, status+1, status+2, status+3, status+4))
@@ -325,10 +400,18 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
* status[3] & 0xFFFF maximum number FIBs outstanding
*/
host->max_sectors = (status[1] >> 16) << 1;
- dev->max_fib_size = status[1] & 0xFFFF;
+ /* Multiple of 32 for PMC */
+ dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
- host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ if (dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
+ (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
+ else
+ host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ dev->max_num_aif = status[4] & 0xFFFF;
/*
* NOTE:
* All these overrides are based on a fixed internal
@@ -340,8 +423,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = 512;
dev->sg_tablesize = host->sg_tablesize
= (512 - sizeof(struct aac_fibhdr)
- - sizeof(struct aac_write) + sizeof(struct sgmap))
- / sizeof(struct sgmap);
+ - sizeof(struct aac_write) + sizeof(struct sgentry))
+ / sizeof(struct sgentry);
host->can_queue = AAC_NUM_IO_FIB;
} else if (acbsize == 2048) {
host->max_sectors = 512;
@@ -375,16 +458,18 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
}
+ if (host->can_queue > AAC_NUM_IO_FIB)
+ host->can_queue = AAC_NUM_IO_FIB;
+
/*
* Ok now init the communication subsystem
*/
- dev->queues = (struct aac_queue_block *) kmalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
+ dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
if (dev->queues == NULL) {
printk(KERN_ERR "Error could not allocate comm region.\n");
return NULL;
}
- memset(dev->queues, 0, sizeof(struct aac_queue_block));
if (aac_comm_init(dev)<0){
kfree(dev->queues);
@@ -393,13 +478,13 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
/*
* Initialize the list of fibs
*/
- if(fib_setup(dev)<0){
+ if (aac_fib_setup(dev) < 0) {
kfree(dev->queues);
return NULL;
}
INIT_LIST_HEAD(&dev->fib_list);
- init_completion(&dev->aif_completion);
+ INIT_LIST_HEAD(&dev->sync_fib_list);
return dev;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 5322865942e..cab190af634 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,8 +39,14 @@
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
-#include <asm/semaphore.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
#include "aacraid.h"
@@ -50,45 +57,51 @@
* Allocate and map the shared PCI space for the FIB blocks used to
* talk to the Adaptec firmware.
*/
-
+
static int fib_map_alloc(struct aac_dev *dev)
{
dprintk((KERN_INFO
"allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
- if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
- * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
- &dev->hw_fib_pa))==NULL)
+ dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
+ (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
+ * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
+ &dev->hw_fib_pa);
+ if (dev->hw_fib_va == NULL)
return -ENOMEM;
return 0;
}
/**
- * fib_map_free - free the fib objects
+ * aac_fib_map_free - free the fib objects
* @dev: Adapter to free
*
* Free the PCI mappings and the memory allocated for FIB blocks
* on this adapter.
*/
-void fib_map_free(struct aac_dev *dev)
+void aac_fib_map_free(struct aac_dev *dev)
{
- pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
+ pci_free_consistent(dev->pdev,
+ dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ dev->hw_fib_va = NULL;
+ dev->hw_fib_pa = 0;
}
/**
- * fib_setup - setup the fibs
+ * aac_fib_setup - setup the fibs
* @dev: Adapter to set up
*
- * Allocate the PCI space for the fibs, map it and then intialise the
+ * Allocate the PCI space for the fibs, map it and then initialise the
* fib area, the unmapped fib data and also the free list
*/
-int fib_setup(struct aac_dev * dev)
+int aac_fib_setup(struct aac_dev * dev)
{
struct fib *fibptr;
- struct hw_fib *hw_fib_va;
+ struct hw_fib *hw_fib;
dma_addr_t hw_fib_pa;
int i;
@@ -99,26 +112,44 @@ int fib_setup(struct aac_dev * dev)
}
if (i<0)
return -ENOMEM;
-
- hw_fib_va = dev->hw_fib_va;
+
+ /* 32 byte alignment for PMC */
+ hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
+ dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ (hw_fib_pa - dev->hw_fib_pa));
+ dev->hw_fib_pa = hw_fib_pa;
+ memset(dev->hw_fib_va, 0,
+ (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
+
+ /* add Xport header */
+ dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ sizeof(struct aac_fib_xporthdr));
+ dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
+
+ hw_fib = dev->hw_fib_va;
hw_fib_pa = dev->hw_fib_pa;
- memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
/*
* Initialise the fibs
*/
- for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++)
{
+ fibptr->flags = 0;
fibptr->dev = dev;
- fibptr->hw_fib = hw_fib_va;
- fibptr->data = (void *) fibptr->hw_fib->data;
+ fibptr->hw_fib_va = hw_fib;
+ fibptr->data = (void *) fibptr->hw_fib_va->data;
fibptr->next = fibptr+1; /* Forward chain the fibs */
- init_MUTEX_LOCKED(&fibptr->event_wait);
+ sema_init(&fibptr->event_wait, 0);
spin_lock_init(&fibptr->event_lock);
- hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
- hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
+ hw_fib->header.XferState = cpu_to_le32(0xffffffff);
+ hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
fibptr->hw_fib_pa = hw_fib_pa;
- hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
- hw_fib_pa = hw_fib_pa + dev->max_fib_size;
+ hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
+ dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
+ hw_fib_pa = hw_fib_pa +
+ dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
/*
* Add the fib chain to the free list
@@ -132,19 +163,19 @@ int fib_setup(struct aac_dev * dev)
}
/**
- * fib_alloc - allocate a fib
+ * aac_fib_alloc - allocate a fib
* @dev: Adapter to allocate the fib for
*
* Allocate a fib from the adapter fib pool. If the pool is empty we
* return NULL.
*/
-
-struct fib * fib_alloc(struct aac_dev *dev)
+
+struct fib *aac_fib_alloc(struct aac_dev *dev)
{
struct fib * fibptr;
unsigned long flags;
spin_lock_irqsave(&dev->fib_lock, flags);
- fibptr = dev->free_fib;
+ fibptr = dev->free_fib;
if(!fibptr){
spin_unlock_irqrestore(&dev->fib_lock, flags);
return fibptr;
@@ -160,7 +191,8 @@ struct fib * fib_alloc(struct aac_dev *dev)
* Null out fields that depend on being zero at the start of
* each I/O
*/
- fibptr->hw_fib->header.XferState = 0;
+ fibptr->hw_fib_va->header.XferState = 0;
+ fibptr->flags = 0;
fibptr->callback = NULL;
fibptr->callback_data = NULL;
@@ -168,50 +200,52 @@ struct fib * fib_alloc(struct aac_dev *dev)
}
/**
- * fib_free - free a fib
+ * aac_fib_free - free a fib
* @fibptr: fib to free up
*
* Frees up a fib and places it on the appropriate queue
- * (either free or timed out)
*/
-
-void fib_free(struct fib * fibptr)
+
+void aac_fib_free(struct fib *fibptr)
{
- unsigned long flags;
+ unsigned long flags, flagsv;
+
+ spin_lock_irqsave(&fibptr->event_lock, flagsv);
+ if (fibptr->done == 2) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
+ return;
+ }
+ spin_unlock_irqrestore(&fibptr->event_lock, flagsv);
spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
- if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
+ if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
aac_config.fib_timeouts++;
- fibptr->next = fibptr->dev->timeout_fib;
- fibptr->dev->timeout_fib = fibptr;
- } else {
- if (fibptr->hw_fib->header.XferState != 0) {
- printk(KERN_WARNING "fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
- (void*)fibptr,
- le32_to_cpu(fibptr->hw_fib->header.XferState));
- }
- fibptr->next = fibptr->dev->free_fib;
- fibptr->dev->free_fib = fibptr;
- }
+ if (fibptr->hw_fib_va->header.XferState != 0) {
+ printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
+ (void*)fibptr,
+ le32_to_cpu(fibptr->hw_fib_va->header.XferState));
+ }
+ fibptr->next = fibptr->dev->free_fib;
+ fibptr->dev->free_fib = fibptr;
spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
}
/**
- * fib_init - initialise a fib
+ * aac_fib_init - initialise a fib
* @fibptr: The fib to initialize
- *
+ *
* Set up the generic fib fields ready for use
*/
-
-void fib_init(struct fib *fibptr)
+
+void aac_fib_init(struct fib *fibptr)
{
- struct hw_fib *hw_fib = fibptr->hw_fib;
+ struct hw_fib *hw_fib = fibptr->hw_fib_va;
+ memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
hw_fib->header.StructType = FIB_MAGIC;
hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
- hw_fib->header.SenderFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
- hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+ hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
}
@@ -222,13 +256,11 @@ void fib_init(struct fib *fibptr)
* Will deallocate and return to the free pool the FIB pointed to by the
* caller.
*/
-
+
static void fib_dealloc(struct fib * fibptr)
{
- struct hw_fib *hw_fib = fibptr->hw_fib;
- if(hw_fib->header.StructType != FIB_MAGIC)
- BUG();
- hw_fib->header.XferState = 0;
+ struct hw_fib *hw_fib = fibptr->hw_fib_va;
+ hw_fib->header.XferState = 0;
}
/*
@@ -237,7 +269,7 @@ static void fib_dealloc(struct fib * fibptr)
* these routines and are the only routines which have a knowledge of the
* how these queues are implemented.
*/
-
+
/**
* aac_get_entry - get a queue entry
* @dev: Adapter
@@ -250,10 +282,11 @@ static void fib_dealloc(struct fib * fibptr)
* is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
* returned.
*/
-
+
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
{
struct aac_queue * q;
+ unsigned long idx;
/*
* All of the queues wrap when they reach the end, so we check
@@ -263,42 +296,38 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
*/
q = &dev->queues->queue[qid];
-
- *index = le32_to_cpu(*(q->headers.producer));
- if ((*index - 2) == le32_to_cpu(*(q->headers.consumer)))
- *nonotify = 1;
-
- if (qid == AdapHighCmdQueue) {
- if (*index >= ADAP_HIGH_CMD_ENTRIES)
- *index = 0;
- } else if (qid == AdapNormCmdQueue) {
- if (*index >= ADAP_NORM_CMD_ENTRIES)
- *index = 0; /* Wrap to front of the Producer Queue. */
- }
- else if (qid == AdapHighRespQueue)
- {
- if (*index >= ADAP_HIGH_RESP_ENTRIES)
- *index = 0;
+
+ idx = *index = le32_to_cpu(*(q->headers.producer));
+ /* Interrupt Moderation, only interrupt for first two entries */
+ if (idx != le32_to_cpu(*(q->headers.consumer))) {
+ if (--idx == 0) {
+ if (qid == AdapNormCmdQueue)
+ idx = ADAP_NORM_CMD_ENTRIES;
+ else
+ idx = ADAP_NORM_RESP_ENTRIES;
+ }
+ if (idx != le32_to_cpu(*(q->headers.consumer)))
+ *nonotify = 1;
}
- else if (qid == AdapNormRespQueue)
- {
- if (*index >= ADAP_NORM_RESP_ENTRIES)
+
+ if (qid == AdapNormCmdQueue) {
+ if (*index >= ADAP_NORM_CMD_ENTRIES)
+ *index = 0; /* Wrap to front of the Producer Queue. */
+ } else {
+ if (*index >= ADAP_NORM_RESP_ENTRIES)
*index = 0; /* Wrap to front of the Producer Queue. */
- }
- else {
- printk("aacraid: invalid qid\n");
- BUG();
}
- if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
+ /* Queue is full */
+ if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
qid, q->numpending);
return 0;
} else {
- *entry = q->base + *index;
+ *entry = q->base + *index;
return 1;
}
-}
+}
/**
* aac_queue_get - get the next free QE
@@ -316,41 +345,33 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
* success.
*/
-static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
+int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
{
struct aac_entry * entry = NULL;
int map = 0;
- struct aac_queue * q = &dev->queues->queue[qid];
-
- spin_lock_irqsave(q->lock, q->SavedIrql);
-
- if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue)
- {
+
+ if (qid == AdapNormCmdQueue) {
/* if no entries wait for some if caller wants to */
- while (!aac_get_entry(dev, qid, &entry, index, nonotify))
- {
+ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
printk(KERN_ERR "GetEntries failed\n");
}
- /*
- * Setup queue entry with a command, status and fib mapped
- */
- entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
- map = 1;
- }
- else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
- {
- while(!aac_get_entry(dev, qid, &entry, index, nonotify))
- {
+ /*
+ * Setup queue entry with a command, status and fib mapped
+ */
+ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+ map = 1;
+ } else {
+ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
/* if no entries wait for some if caller wants to */
}
- /*
- * Setup queue entry with command, status and fib mapped
- */
- entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
- entry->addr = hw_fib->header.SenderFibAddress;
- /* Restore adapters pointer to the FIB */
- hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
- map = 0;
+ /*
+ * Setup queue entry with command, status and fib mapped
+ */
+ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+ entry->addr = hw_fib->header.SenderFibAddress;
+ /* Restore adapters pointer to the FIB */
+ hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
+ map = 0;
}
/*
* If MapFib is true than we need to map the Fib and put pointers
@@ -361,52 +382,16 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
return 0;
}
-
-/**
- * aac_insert_entry - insert a queue entry
- * @dev: Adapter
- * @index: Index of entry to insert
- * @qid: Queue number
- * @nonotify: Suppress adapter notification
- *
- * Gets the next free QE off the requested priorty adapter command
- * queue and associates the Fib with the QE. The QE represented by
- * index is ready to insert on the queue when this routine returns
- * success.
- */
-
-static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
-{
- struct aac_queue * q = &dev->queues->queue[qid];
-
- if(q == NULL)
- BUG();
- *(q->headers.producer) = cpu_to_le32(index + 1);
- spin_unlock_irqrestore(q->lock, q->SavedIrql);
-
- if (qid == AdapHighCmdQueue ||
- qid == AdapNormCmdQueue ||
- qid == AdapHighRespQueue ||
- qid == AdapNormRespQueue)
- {
- if (!nonotify)
- aac_adapter_notify(dev, qid);
- }
- else
- printk("Suprise insert!\n");
- return 0;
-}
-
/*
- * Define the highest level of host to adapter communication routines.
- * These routines will support host to adapter FS commuication. These
+ * Define the highest level of host to adapter communication routines.
+ * These routines will support host to adapter FS commuication. These
* routines have no knowledge of the commuication method used. This level
* sends and receives FIBs. This level has no knowledge of how these FIBs
* get passed back and forth.
*/
/**
- * fib_send - send a fib to the adapter
+ * aac_fib_send - send a fib to the adapter
* @command: Command to send
* @fibptr: The fib
* @size: Size of fib data area
@@ -421,29 +406,33 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
* an event to wait on must be supplied. This event will be set when a
* response FIB is received from the adapter.
*/
-
-int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
+
+int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
+ int priority, int wait, int reply, fib_callback callback,
+ void *callback_data)
{
- u32 index;
- u32 qid;
struct aac_dev * dev = fibptr->dev;
- unsigned long nointr = 0;
- struct hw_fib * hw_fib = fibptr->hw_fib;
- struct aac_queue * q;
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
unsigned long flags = 0;
+ unsigned long qflags;
+ unsigned long mflags = 0;
+ unsigned long sflags = 0;
+
+
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
/*
- * There are 5 cases with the wait and reponse requested flags.
+ * There are 5 cases with the wait and response requested flags.
* The only invalid cases are if the caller requests to wait and
* does not request a response and if the caller does not want a
* response and the Fib is not allocated from pool. If a response
* is not requesed the Fib will just be deallocaed by the DPC
* routine when the response comes back from the adapter. No
- * further processing will be done besides deleting the Fib. We
+ * further processing will be done besides deleting the Fib. We
* will have a debug mode where the adapter can notify the host
* it had a problem and the host can log that fact.
*/
+ fibptr->flags = 0;
if (wait && !reply) {
return -EINVAL;
} else if (!wait && reply) {
@@ -455,13 +444,13 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
} else if (wait && reply) {
hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
FIB_COUNTER_INCREMENT(aac_config.NormalSent);
- }
+ }
/*
* Map the fib into 32bits by using the fib number
*/
- hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr-dev->fibs)) << 1);
- hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
+ hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
+ hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
/*
* Set FIB state to indicate where it came from and if we want a
* response from the adapter. Also load the command from the
@@ -471,38 +460,19 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
*/
hw_fib->header.Command = cpu_to_le16(command);
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
- fibptr->hw_fib->header.Flags = 0; /* 0 the flags field - internal only*/
/*
* Set the size of the Fib we want to send to the adapter
*/
hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
return -EMSGSIZE;
- }
+ }
/*
* Get a queue entry connect the FIB to it and send an notify
* the adapter a command is ready.
*/
- if (priority == FsaHigh) {
- hw_fib->header.XferState |= cpu_to_le32(HighPriority);
- qid = AdapHighCmdQueue;
- } else {
- hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
- qid = AdapNormCmdQueue;
- }
- q = &dev->queues->queue[qid];
+ hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
- if(wait)
- spin_lock_irqsave(&fibptr->event_lock, flags);
- if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
- return -EWOULDBLOCK;
- dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
- dprintk((KERN_DEBUG "Fib contents:.\n"));
- dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
- dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
- dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
- dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
- dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
/*
* Fill in the Callback and CallbackContext if we are not
* going to wait.
@@ -510,31 +480,135 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
if (!wait) {
fibptr->callback = callback;
fibptr->callback_data = callback_data;
+ fibptr->flags = FIB_CONTEXT_FLAG;
}
- FIB_COUNTER_INCREMENT(aac_config.FibsSent);
- list_add_tail(&fibptr->queue, &q->pendingq);
- q->numpending++;
fibptr->done = 0;
- fibptr->flags = 0;
- if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0)
- return -EWOULDBLOCK;
+ FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+
+ dprintk((KERN_DEBUG "Fib contents:.\n"));
+ dprintk((KERN_DEBUG " Command = %d.\n", le32_to_cpu(hw_fib->header.Command)));
+ dprintk((KERN_DEBUG " SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
+ dprintk((KERN_DEBUG " XferState = %x.\n", le32_to_cpu(hw_fib->header.XferState)));
+ dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib_va));
+ dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
+ dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
+
+ if (!dev->queues)
+ return -EBUSY;
+
+ if (wait) {
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
+ printk(KERN_INFO "No management Fibs Available:%d\n",
+ dev->management_fib_count);
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ return -EBUSY;
+ }
+ dev->management_fib_count++;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ }
+
+ if (dev->sync_mode) {
+ if (wait)
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (dev->sync_fib) {
+ list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ } else {
+ dev->sync_fib = fibptr;
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+ if (wait) {
+ fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+ if (down_interruptible(&fibptr->event_wait)) {
+ fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
+ return -EFAULT;
+ }
+ return 0;
+ }
+ return -EINPROGRESS;
+ }
+
+ if (aac_adapter_deliver(fibptr) != 0) {
+ printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
+ if (wait) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+ }
+ return -EBUSY;
+ }
+
+
/*
- * If the caller wanted us to wait for response wait now.
+ * If the caller wanted us to wait for response wait now.
*/
-
+
if (wait) {
spin_unlock_irqrestore(&fibptr->event_lock, flags);
- down(&fibptr->event_wait);
- if(fibptr->done == 0)
- BUG();
-
- if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
- return -ETIMEDOUT;
- } else {
- return 0;
+ /* Only set for first known interruptable command */
+ if (wait < 0) {
+ /*
+ * *VERY* Dangerous to time out a command, the
+ * assumption is made that we have no hope of
+ * functioning because an interrupt routing or other
+ * hardware failure has occurred.
+ */
+ unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
+ while (down_trylock(&fibptr->event_wait)) {
+ int blink;
+ if (time_is_before_eq_jiffies(timeout)) {
+ struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
+ spin_lock_irqsave(q->lock, qflags);
+ q->numpending--;
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (wait == -1) {
+ printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
+ "Usually a result of a PCI interrupt routing problem;\n"
+ "update mother board BIOS or consider utilizing one of\n"
+ "the SAFE mode kernel options (acpi, apic etc)\n");
+ }
+ return -ETIMEDOUT;
+ }
+ if ((blink = aac_adapter_check_health(dev)) > 0) {
+ if (wait == -1) {
+ printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
+ "Usually a result of a serious unrecoverable hardware problem\n",
+ blink);
+ }
+ return -EFAULT;
+ }
+ /* We used to udelay() here but that absorbed
+ * a CPU when a timeout occured. Not very
+ * useful. */
+ cpu_relax();
+ }
+ } else if (down_interruptible(&fibptr->event_wait)) {
+ /* Do nothing ... satisfy
+ * down_interruptible must_check */
+ }
+
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ if (fibptr->done == 0) {
+ fibptr->done = 2; /* Tell interrupt we aborted */
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ return -ERESTARTSYS;
}
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ BUG_ON(fibptr->done == 0);
+
+ if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+ return -ETIMEDOUT;
+ return 0;
}
/*
* If the user does not want a response than return success otherwise
@@ -546,15 +620,15 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
return 0;
}
-/**
+/**
* aac_consumer_get - get the top of the queue
* @dev: Adapter
* @q: Queue
* @entry: Return entry
*
* Will return a pointer to the entry on the top of the queue requested that
- * we are a consumer of, and return the address of the queue entry. It does
- * not change the state of the queue.
+ * we are a consumer of, and return the address of the queue entry. It does
+ * not change the state of the queue.
*/
int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
@@ -569,10 +643,10 @@ int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entr
* the end of the queue, else we just use the entry
* pointed to by the header index
*/
- if (le32_to_cpu(*q->headers.consumer) >= q->entries)
- index = 0;
+ if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+ index = 0;
else
- index = le32_to_cpu(*q->headers.consumer);
+ index = le32_to_cpu(*q->headers.consumer);
*entry = q->base + index;
status = 1;
}
@@ -596,37 +670,31 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
wasfull = 1;
-
+
if (le32_to_cpu(*q->headers.consumer) >= q->entries)
*q->headers.consumer = cpu_to_le32(1);
else
- *q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
-
+ le32_add_cpu(q->headers.consumer, 1);
+
if (wasfull) {
switch (qid) {
case HostNormCmdQueue:
notify = HostNormCmdNotFull;
break;
- case HostHighCmdQueue:
- notify = HostHighCmdNotFull;
- break;
case HostNormRespQueue:
notify = HostNormRespNotFull;
break;
- case HostHighRespQueue:
- notify = HostHighRespNotFull;
- break;
default:
BUG();
return;
}
aac_adapter_notify(dev, notify);
}
-}
+}
/**
- * fib_adapter_complete - complete adapter issued fib
+ * aac_fib_adapter_complete - complete adapter issued fib
* @fibptr: fib to complete
* @size: size of fib
*
@@ -634,96 +702,109 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
* the adapter.
*/
-int fib_adapter_complete(struct fib * fibptr, unsigned short size)
+int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
{
- struct hw_fib * hw_fib = fibptr->hw_fib;
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
struct aac_dev * dev = fibptr->dev;
+ struct aac_queue * q;
unsigned long nointr = 0;
- if (hw_fib->header.XferState == 0)
- return 0;
+ unsigned long qflags;
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ kfree(hw_fib);
+ return 0;
+ }
+
+ if (hw_fib->header.XferState == 0) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE)
+ kfree(hw_fib);
+ return 0;
+ }
/*
* If we plan to do anything check the structure type first.
- */
- if ( hw_fib->header.StructType != FIB_MAGIC ) {
- return -EINVAL;
+ */
+ if (hw_fib->header.StructType != FIB_MAGIC &&
+ hw_fib->header.StructType != FIB_MAGIC2 &&
+ hw_fib->header.StructType != FIB_MAGIC2_64) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE)
+ kfree(hw_fib);
+ return -EINVAL;
}
/*
* This block handles the case where the adapter had sent us a
* command and we have finished processing the command. We
- * call completeFib when we are done processing the command
- * and want to send a response back to the adapter. This will
+ * call completeFib when we are done processing the command
+ * and want to send a response back to the adapter. This will
* send the completed cdb to the adapter.
*/
if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
- hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
- if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) {
- u32 index;
- if (size)
- {
- size += sizeof(struct aac_fibhdr);
- if (size > le16_to_cpu(hw_fib->header.SenderSize))
- return -EMSGSIZE;
- hw_fib->header.Size = cpu_to_le16(size);
- }
- if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
- return -EWOULDBLOCK;
- }
- if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
- }
- } else if (hw_fib->header.XferState &
- cpu_to_le32(NormalPriority)) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE) {
+ kfree (hw_fib);
+ } else {
u32 index;
-
+ hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
if (size) {
size += sizeof(struct aac_fibhdr);
- if (size > le16_to_cpu(hw_fib->header.SenderSize))
+ if (size > le16_to_cpu(hw_fib->header.SenderSize))
return -EMSGSIZE;
hw_fib->header.Size = cpu_to_le16(size);
}
- if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
- return -EWOULDBLOCK;
- if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
- {
- }
+ q = &dev->queues->queue[AdapNormRespQueue];
+ spin_lock_irqsave(q->lock, qflags);
+ aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
+ *(q->headers.producer) = cpu_to_le32(index + 1);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (!(nointr & (int)aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormRespQueue);
}
+ } else {
+ printk(KERN_WARNING "aac_fib_adapter_complete: "
+ "Unknown xferstate detected.\n");
+ BUG();
}
- else
- {
- printk(KERN_WARNING "fib_adapter_complete: Unknown xferstate detected.\n");
- BUG();
- }
return 0;
}
/**
- * fib_complete - fib completion handler
+ * aac_fib_complete - fib completion handler
* @fib: FIB to complete
*
* Will do all necessary work to complete a FIB.
*/
-
-int fib_complete(struct fib * fibptr)
+
+int aac_fib_complete(struct fib *fibptr)
{
- struct hw_fib * hw_fib = fibptr->hw_fib;
+ unsigned long flags;
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
/*
* Check for a fib which has already been completed
*/
if (hw_fib->header.XferState == 0)
- return 0;
+ return 0;
/*
* If we plan to do anything check the structure type first.
- */
+ */
- if (hw_fib->header.StructType != FIB_MAGIC)
- return -EINVAL;
+ if (hw_fib->header.StructType != FIB_MAGIC &&
+ hw_fib->header.StructType != FIB_MAGIC2 &&
+ hw_fib->header.StructType != FIB_MAGIC2_64)
+ return -EINVAL;
/*
- * This block completes a cdb which orginated on the host and we
+ * This block completes a cdb which orginated on the host and we
* just need to deallocate the cdb or reinit it. At this point the
* command is complete that we had sent to the adapter and this
* cdb could be reused.
*/
+ spin_lock_irqsave(&fibptr->event_lock, flags);
+ if (fibptr->done == 2) {
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&fibptr->event_lock, flags);
+
if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
{
@@ -740,7 +821,7 @@ int fib_complete(struct fib * fibptr)
fib_dealloc(fibptr);
} else {
BUG();
- }
+ }
return 0;
}
@@ -760,7 +841,7 @@ void aac_printf(struct aac_dev *dev, u32 val)
{
int length = val & 0xffff;
int level = (val >> 16) & 0xffff;
-
+
/*
* The size of the printfbuf is set in port.c
* There is no variable or define for it
@@ -770,13 +851,771 @@ void aac_printf(struct aac_dev *dev, u32 val)
if (cp[length] != 0)
cp[length] = 0;
if (level == LOG_AAC_HIGH_ERROR)
- printk(KERN_WARNING "aacraid:%s", cp);
+ printk(KERN_WARNING "%s:%s", dev->name, cp);
else
- printk(KERN_INFO "aacraid:%s", cp);
+ printk(KERN_INFO "%s:%s", dev->name, cp);
}
- memset(cp, 0, 256);
+ memset(cp, 0, 256);
}
+
+/**
+ * aac_handle_aif - Handle a message from the firmware
+ * @dev: Which adapter this fib is from
+ * @fibptr: Pointer to fibptr from adapter
+ *
+ * This routine handles a driver notify fib from the adapter and
+ * dispatches it to the appropriate routine for handling.
+ */
+
+#define AIF_SNIFF_TIMEOUT (30*HZ)
+static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+{
+ struct hw_fib * hw_fib = fibptr->hw_fib_va;
+ struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
+ u32 channel, id, lun, container;
+ struct scsi_device *device;
+ enum {
+ NOTHING,
+ DELETE,
+ ADD,
+ CHANGE
+ } device_config_needed = NOTHING;
+
+ /* Sniff for container changes */
+
+ if (!dev || !dev->fsa_dev)
+ return;
+ container = channel = id = lun = (u32)-1;
+
+ /*
+ * We have set this up to try and minimize the number of
+ * re-configures that take place. As a result of this when
+ * certain AIF's come in we will set a flag waiting for another
+ * type of AIF before setting the re-config flag.
+ */
+ switch (le32_to_cpu(aifcmd->command)) {
+ case AifCmdDriverNotify:
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+ /*
+ * Morph or Expand complete
+ */
+ case AifDenMorphComplete:
+ case AifDenVolumeExtendComplete:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+
+ /*
+ * Find the scsi_device associated with the SCSI
+ * address. Make sure we have the right array, and if
+ * so set the flag to initiate a new re-config once we
+ * see an AifEnConfigChange AIF come through.
+ */
+
+ if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ CONTAINER_TO_CHANNEL(container),
+ CONTAINER_TO_ID(container),
+ CONTAINER_TO_LUN(container));
+ if (device) {
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ scsi_device_put(device);
+ }
+ }
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdEventNotify:
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+ case AifEnBatteryEvent:
+ dev->cache_protected =
+ (((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
+ break;
+ /*
+ * Add an Array.
+ */
+ case AifEnAddContainer:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = ADD;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ break;
+
+ /*
+ * Delete an Array.
+ */
+ case AifEnDeleteContainer:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ dev->fsa_dev[container].config_needed = DELETE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ break;
+
+ /*
+ * Container change detected. If we currently are not
+ * waiting on something else, setup to wait on a Config Change.
+ */
+ case AifEnContainerChange:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if (container >= dev->maximum_num_containers)
+ break;
+ if (dev->fsa_dev[container].config_waiting_on &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ break;
+ dev->fsa_dev[container].config_needed = CHANGE;
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnConfigChange;
+ dev->fsa_dev[container].config_waiting_stamp = jiffies;
+ break;
+
+ case AifEnConfigChange:
+ break;
+
+ case AifEnAddJBOD:
+ case AifEnDeleteJBOD:
+ container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+ if ((container >> 28)) {
+ container = (u32)-1;
+ break;
+ }
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
+ break;
+ }
+ id = container & 0xFFFF;
+ if (id >= dev->maximum_num_physicals) {
+ container = (u32)-1;
+ break;
+ }
+ lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[0] ==
+ cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
+ if (device_config_needed == ADD) {
+ device = scsi_device_lookup(dev->scsi_host_ptr,
+ channel,
+ id,
+ lun);
+ if (device) {
+ scsi_remove_device(device);
+ scsi_device_put(device);
+ }
+ }
+ break;
+
+ case AifEnEnclosureManagement:
+ /*
+ * If in JBOD mode, automatic exposure of new
+ * physical target to be suppressed until configured.
+ */
+ if (dev->jbod)
+ break;
+ switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
+ case EM_DRIVE_INSERTION:
+ case EM_DRIVE_REMOVAL:
+ container = le32_to_cpu(
+ ((__le32 *)aifcmd->data)[2]);
+ if ((container >> 28)) {
+ container = (u32)-1;
+ break;
+ }
+ channel = (container >> 24) & 0xF;
+ if (channel >= dev->maximum_num_channels) {
+ container = (u32)-1;
+ break;
+ }
+ id = container & 0xFFFF;
+ lun = (container >> 16) & 0xFF;
+ container = (u32)-1;
+ if (id >= dev->maximum_num_physicals) {
+ /* legacy dev_t ? */
+ if ((0x2000 <= id) || lun || channel ||
+ ((channel = (id >> 7) & 0x3F) >=
+ dev->maximum_num_channels))
+ break;
+ lun = (id >> 4) & 7;
+ id &= 0xF;
+ }
+ channel = aac_phys_to_logical(channel);
+ device_config_needed =
+ (((__le32 *)aifcmd->data)[3]
+ == cpu_to_le32(EM_DRIVE_INSERTION)) ?
+ ADD : DELETE;
+ break;
+ }
+ break;
+ }
+
+ /*
+ * If we are waiting on something and this happens to be
+ * that thing then set the re-configure flag.
+ */
+ if (container != (u32)-1) {
+ if (container >= dev->maximum_num_containers)
+ break;
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ } else for (container = 0;
+ container < dev->maximum_num_containers; ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on ==
+ le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+ dev->fsa_dev[container].config_waiting_on = 0;
+ }
+ break;
+
+ case AifCmdJobProgress:
+ /*
+ * These are job progress AIF's. When a Clear is being
+ * done on a container it is initially created then hidden from
+ * the OS. When the clear completes we don't get a config
+ * change so we monitor the job status complete on a clear then
+ * wait for a container change.
+ */
+
+ if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+ (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
+ ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = ADD;
+ dev->fsa_dev[container].config_waiting_stamp =
+ jiffies;
+ }
+ }
+ if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+ ((__le32 *)aifcmd->data)[6] == 0 &&
+ ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
+ for (container = 0;
+ container < dev->maximum_num_containers;
+ ++container) {
+ /*
+ * Stomp on all config sequencing for all
+ * containers?
+ */
+ dev->fsa_dev[container].config_waiting_on =
+ AifEnContainerChange;
+ dev->fsa_dev[container].config_needed = DELETE;
+ dev->fsa_dev[container].config_waiting_stamp =
+ jiffies;
+ }
+ }
+ break;
+ }
+
+ container = 0;
+retry_next:
+ if (device_config_needed == NOTHING)
+ for (; container < dev->maximum_num_containers; ++container) {
+ if ((dev->fsa_dev[container].config_waiting_on == 0) &&
+ (dev->fsa_dev[container].config_needed != NOTHING) &&
+ time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
+ device_config_needed =
+ dev->fsa_dev[container].config_needed;
+ dev->fsa_dev[container].config_needed = NOTHING;
+ channel = CONTAINER_TO_CHANNEL(container);
+ id = CONTAINER_TO_ID(container);
+ lun = CONTAINER_TO_LUN(container);
+ break;
+ }
+ }
+ if (device_config_needed == NOTHING)
+ return;
+
+ /*
+ * If we decided that a re-configuration needs to be done,
+ * schedule it here on the way out the door, please close the door
+ * behind you.
+ */
+
+ /*
+ * Find the scsi_device associated with the SCSI address,
+ * and mark it as changed, invalidating the cache. This deals
+ * with changes to existing device IDs.
+ */
+
+ if (!dev || !dev->scsi_host_ptr)
+ return;
+ /*
+ * force reload of disk info via aac_probe_container
+ */
+ if ((channel == CONTAINER_CHANNEL) &&
+ (device_config_needed != NOTHING)) {
+ if (dev->fsa_dev[container].valid == 1)
+ dev->fsa_dev[container].valid = 2;
+ aac_probe_container(dev, container);
+ }
+ device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
+ if (device) {
+ switch (device_config_needed) {
+ case DELETE:
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
+ if (scsi_device_online(device)) {
+ scsi_device_set_state(device, SDEV_OFFLINE);
+ sdev_printk(KERN_INFO, device,
+ "Device offlined - %s\n",
+ (channel == CONTAINER_CHANNEL) ?
+ "array deleted" :
+ "enclosure services event");
+ }
+#endif
+ break;
+ case ADD:
+ if (!scsi_device_online(device)) {
+ sdev_printk(KERN_INFO, device,
+ "Device online - %s\n",
+ (channel == CONTAINER_CHANNEL) ?
+ "array created" :
+ "enclosure services event");
+ scsi_device_set_state(device, SDEV_RUNNING);
+ }
+ /* FALLTHRU */
+ case CHANGE:
+ if ((channel == CONTAINER_CHANNEL)
+ && (!dev->fsa_dev[container].valid)) {
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+ scsi_remove_device(device);
+#else
+ if (!scsi_device_online(device))
+ break;
+ scsi_device_set_state(device, SDEV_OFFLINE);
+ sdev_printk(KERN_INFO, device,
+ "Device offlined - %s\n",
+ "array failed");
+#endif
+ break;
+ }
+ scsi_rescan_device(&device->sdev_gendev);
+
+ default:
+ break;
+ }
+ scsi_device_put(device);
+ device_config_needed = NOTHING;
+ }
+ if (device_config_needed == ADD)
+ scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
+ if (channel == CONTAINER_CHANNEL) {
+ container++;
+ device_config_needed = NOTHING;
+ goto retry_next;
+ }
+}
+
+static int _aac_reset_adapter(struct aac_dev *aac, int forced)
+{
+ int index, quirks;
+ int retval;
+ struct Scsi_Host *host;
+ struct scsi_device *dev;
+ struct scsi_cmnd *command;
+ struct scsi_cmnd *command_list;
+ int jafo = 0;
+
+ /*
+ * Assumptions:
+ * - host is locked, unless called by the aacraid thread.
+ * (a matter of convenience, due to legacy issues surrounding
+ * eh_host_adapter_reset).
+ * - in_reset is asserted, so no new i/o is getting to the
+ * card.
+ * - The card is dead, or will be very shortly ;-/ so no new
+ * commands are completing in the interrupt service.
+ */
+ host = aac->scsi_host_ptr;
+ scsi_block_requests(host);
+ aac_adapter_disable_int(aac);
+ if (aac->thread->pid != current->pid) {
+ spin_unlock_irq(host->host_lock);
+ kthread_stop(aac->thread);
+ jafo = 1;
+ }
+
+ /*
+ * If a positive health, means in a known DEAD PANIC
+ * state and the adapter could be reset to `try again'.
+ */
+ retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
+
+ if (retval)
+ goto out;
+
+ /*
+ * Loop through the fibs, close the synchronous FIBS
+ */
+ for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
+ struct fib *fib = &aac->fibs[index];
+ if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+ (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
+ unsigned long flagv;
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ up(&fib->event_wait);
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ schedule();
+ retval = 0;
+ }
+ }
+ /* Give some extra time for ioctls to complete. */
+ if (retval == 0)
+ ssleep(2);
+ index = aac->cardtype;
+
+ /*
+ * Re-initialize the adapter, first free resources, then carefully
+ * apply the initialization sequence to come back again. Only risk
+ * is a change in Firmware dropping cache, it is assumed the caller
+ * will ensure that i/o is queisced and the card is flushed in that
+ * case.
+ */
+ aac_fib_map_free(aac);
+ pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
+ aac->comm_addr = NULL;
+ aac->comm_phys = 0;
+ kfree(aac->queues);
+ aac->queues = NULL;
+ free_irq(aac->pdev->irq, aac);
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
+ kfree(aac->fsa_dev);
+ aac->fsa_dev = NULL;
+ quirks = aac_get_driver_ident(index)->quirks;
+ if (quirks & AAC_QUIRK_31BIT) {
+ if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
+ ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
+ goto out;
+ } else {
+ if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
+ ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
+ goto out;
+ }
+ if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
+ goto out;
+ if (quirks & AAC_QUIRK_31BIT)
+ if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
+ goto out;
+ if (jafo) {
+ aac->thread = kthread_run(aac_command_thread, aac, "%s",
+ aac->name);
+ if (IS_ERR(aac->thread)) {
+ retval = PTR_ERR(aac->thread);
+ goto out;
+ }
+ }
+ (void)aac_get_adapter_info(aac);
+ if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
+ host->sg_tablesize = 34;
+ host->max_sectors = (host->sg_tablesize * 8) + 112;
+ }
+ if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
+ host->sg_tablesize = 17;
+ host->max_sectors = (host->sg_tablesize * 8) + 112;
+ }
+ aac_get_config_status(aac, 1);
+ aac_get_containers(aac);
+ /*
+ * This is where the assumption that the Adapter is quiesced
+ * is important.
+ */
+ command_list = NULL;
+ __shost_for_each_device(dev, host) {
+ unsigned long flags;
+ spin_lock_irqsave(&dev->list_lock, flags);
+ list_for_each_entry(command, &dev->cmd_list, list)
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ command->SCp.buffer = (struct scatterlist *)command_list;
+ command_list = command;
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flags);
+ }
+ while ((command = command_list)) {
+ command_list = (struct scsi_cmnd *)command->SCp.buffer;
+ command->SCp.buffer = NULL;
+ command->result = DID_OK << 16
+ | COMMAND_COMPLETE << 8
+ | SAM_STAT_TASK_SET_FULL;
+ command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ command->scsi_done(command);
+ }
+ retval = 0;
+
+out:
+ aac->in_reset = 0;
+ scsi_unblock_requests(host);
+ if (jafo) {
+ spin_lock_irq(host->host_lock);
+ }
+ return retval;
+}
+
+int aac_reset_adapter(struct aac_dev * aac, int forced)
+{
+ unsigned long flagv = 0;
+ int retval;
+ struct Scsi_Host * host;
+
+ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+ return -EBUSY;
+
+ if (aac->in_reset) {
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+ return -EBUSY;
+ }
+ aac->in_reset = 1;
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+ /*
+ * Wait for all commands to complete to this specific
+ * target (block maximum 60 seconds). Although not necessary,
+ * it does make us a good storage citizen.
+ */
+ host = aac->scsi_host_ptr;
+ scsi_block_requests(host);
+ if (forced < 2) for (retval = 60; retval; --retval) {
+ struct scsi_device * dev;
+ struct scsi_cmnd * command;
+ int active = 0;
+
+ __shost_for_each_device(dev, host) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+
+ /* Quiesce build, flush cache, write through mode */
+ if (forced < 2)
+ aac_send_shutdown(aac);
+ spin_lock_irqsave(host->host_lock, flagv);
+ retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
+ spin_unlock_irqrestore(host->host_lock, flagv);
+
+ if ((forced < 2) && (retval == -ENODEV)) {
+ /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
+ struct fib * fibctx = aac_fib_alloc(aac);
+ if (fibctx) {
+ struct aac_pause *cmd;
+ int status;
+
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_pause *) fib_data(fibctx);
+
+ cmd->command = cpu_to_le32(VM_ContainerConfig);
+ cmd->type = cpu_to_le32(CT_PAUSE_IO);
+ cmd->timeout = cpu_to_le32(1);
+ cmd->min = cpu_to_le32(1);
+ cmd->noRescan = cpu_to_le32(1);
+ cmd->count = cpu_to_le32(0);
+
+ status = aac_fib_send(ContainerCommand,
+ fibctx,
+ sizeof(struct aac_pause),
+ FsaNormal,
+ -2 /* Timeout silently */, 1,
+ NULL, NULL);
+
+ if (status >= 0)
+ aac_fib_complete(fibctx);
+ /* FIB should be freed only after getting
+ * the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibctx);
+ }
+ }
+
+ return retval;
+}
+
+int aac_check_health(struct aac_dev * aac)
+{
+ int BlinkLED;
+ unsigned long time_now, flagv = 0;
+ struct list_head * entry;
+ struct Scsi_Host * host;
+
+ /* Extending the scope of fib_lock slightly to protect aac->in_reset */
+ if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+ return 0;
+
+ if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+ return 0; /* OK */
+ }
+
+ aac->in_reset = 1;
+
+ /* Fake up an AIF:
+ * aac_aifcmd.command = AifCmdEventNotify = 1
+ * aac_aifcmd.seqnum = 0xFFFFFFFF
+ * aac_aifcmd.data[0] = AifEnExpEvent = 23
+ * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
+ * aac.aifcmd.data[2] = AifHighPriority = 3
+ * aac.aifcmd.data[3] = BlinkLED
+ */
+
+ time_now = jiffies/HZ;
+ entry = aac->fib_list.next;
+
+ /*
+ * For each Context that is on the
+ * fibctxList, make a copy of the
+ * fib, and then set the event to wake up the
+ * thread that is waiting for it.
+ */
+ while (entry != &aac->fib_list) {
+ /*
+ * Extract the fibctx
+ */
+ struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
+ struct hw_fib * hw_fib;
+ struct fib * fib;
+ /*
+ * Check if the queue is getting
+ * backlogged
+ */
+ if (fibctx->count > 20) {
+ /*
+ * It's *not* jiffies folks,
+ * but jiffies / HZ, so do not
+ * panic ...
+ */
+ u32 time_last = fibctx->jiffies;
+ /*
+ * Has it been > 2 minutes
+ * since the last read off
+ * the queue?
+ */
+ if ((time_now - time_last) > aif_timeout) {
+ entry = entry->next;
+ aac_close_fib_context(aac, fibctx);
+ continue;
+ }
+ }
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock
+ */
+ hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
+ fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
+ if (fib && hw_fib) {
+ struct aac_aifcmd * aif;
+
+ fib->hw_fib_va = hw_fib;
+ fib->dev = aac;
+ aac_fib_init(fib);
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof (struct fib);
+ fib->data = hw_fib->data;
+ aif = (struct aac_aifcmd *)hw_fib->data;
+ aif->command = cpu_to_le32(AifCmdEventNotify);
+ aif->seqnum = cpu_to_le32(0xFFFFFFFF);
+ ((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
+ ((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
+ ((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
+ ((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
+
+ /*
+ * Put the FIB onto the
+ * fibctx's fibs
+ */
+ list_add_tail(&fib->fiblink, &fibctx->fib_list);
+ fibctx->count++;
+ /*
+ * Set the event to wake up the
+ * thread that will waiting.
+ */
+ up(&fibctx->wait_sem);
+ } else {
+ printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+ kfree(fib);
+ kfree(hw_fib);
+ }
+ entry = entry->next;
+ }
+
+ spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+ if (BlinkLED < 0) {
+ printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
+ goto out;
+ }
+
+ printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
+
+ if (!aac_check_reset || ((aac_check_reset == 1) &&
+ (aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET)))
+ goto out;
+ host = aac->scsi_host_ptr;
+ if (aac->thread->pid != current->pid)
+ spin_lock_irqsave(host->host_lock, flagv);
+ BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
+ if (aac->thread->pid != current->pid)
+ spin_unlock_irqrestore(host->host_lock, flagv);
+ return BlinkLED;
+
+out:
+ aac->in_reset = 0;
+ return BlinkLED;
+}
+
+
/**
* aac_command_thread - command processing thread
* @dev: Adapter to monitor
@@ -786,58 +1625,56 @@ void aac_printf(struct aac_dev *dev, u32 val)
* until the queue is empty. When the queue is empty it will wait for
* more FIBs.
*/
-
-int aac_command_thread(struct aac_dev * dev)
+
+int aac_command_thread(void *data)
{
+ struct aac_dev *dev = data;
struct hw_fib *hw_fib, *hw_newfib;
struct fib *fib, *newfib;
- struct aac_queue_block *queues = dev->queues;
struct aac_fib_context *fibctx;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
+ unsigned long next_jiffies = jiffies + HZ;
+ unsigned long next_check_jiffies = next_jiffies;
+ long difference = HZ;
/*
* We can only have one thread per adapter for AIF's.
*/
if (dev->aif_thread)
return -EINVAL;
- /*
- * Set up the name that will appear in 'ps'
- * stored in task_struct.comm[16].
- */
- daemonize("aacraid");
- allow_signal(SIGKILL);
+
/*
* Let the DPC know it has a place to send the AIF's to.
*/
dev->aif_thread = 1;
- add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+ add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- while(1)
- {
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
- while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) {
+ dprintk ((KERN_INFO "aac_command_thread start\n"));
+ while (1) {
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
+ while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
struct list_head *entry;
struct aac_aifcmd * aifcmd;
set_current_state(TASK_RUNNING);
-
- entry = queues->queue[HostNormCmdQueue].cmdq.next;
+
+ entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
list_del(entry);
-
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
+
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
fib = list_entry(entry, struct fib, fiblink);
/*
- * We will process the FIB here or pass it to a
- * worker thread that is TBD. We Really can't
+ * We will process the FIB here or pass it to a
+ * worker thread that is TBD. We Really can't
* do anything at this point since we don't have
* anything defined for this thread to do.
*/
- hw_fib = fib->hw_fib;
+ hw_fib = fib->hw_fib_va;
memset(fib, 0, sizeof(struct fib));
fib->type = FSAFS_NTC_FIB_CONTEXT;
- fib->size = sizeof( struct fib );
- fib->hw_fib = hw_fib;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
/*
@@ -846,26 +1683,81 @@ int aac_command_thread(struct aac_dev * dev)
aifcmd = (struct aac_aifcmd *) hw_fib->data;
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
/* Handle Driver Notify Events */
+ aac_handle_aif(dev, fib);
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
- fib_adapter_complete(fib, (u16)sizeof(u32));
+ aac_fib_adapter_complete(fib, (u16)sizeof(u32));
} else {
- struct list_head *entry;
/* The u32 here is important and intended. We are using
32bit wrapping time to fit the adapter field */
-
+
u32 time_now, time_last;
unsigned long flagv;
-
+ unsigned num;
+ struct hw_fib ** hw_fib_pool, ** hw_fib_p;
+ struct fib ** fib_pool, ** fib_p;
+
+ /* Sniff events */
+ if ((aifcmd->command ==
+ cpu_to_le32(AifCmdEventNotify)) ||
+ (aifcmd->command ==
+ cpu_to_le32(AifCmdJobProgress))) {
+ aac_handle_aif(dev, fib);
+ }
+
time_now = jiffies/HZ;
+ /*
+ * Warning: no sleep allowed while
+ * holding spinlock. We take the estimate
+ * and pre-allocate a set of fibs outside the
+ * lock.
+ */
+ num = le32_to_cpu(dev->init->AdapterFibsSize)
+ / sizeof(struct hw_fib); /* some extra */
+ spin_lock_irqsave(&dev->fib_lock, flagv);
+ entry = dev->fib_list.next;
+ while (entry != &dev->fib_list) {
+ entry = entry->next;
+ ++num;
+ }
+ spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ hw_fib_pool = NULL;
+ fib_pool = NULL;
+ if (num
+ && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
+ && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
+ --hw_fib_p;
+ break;
+ }
+ if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
+ kfree(*(--hw_fib_p));
+ break;
+ }
+ }
+ if ((num = hw_fib_p - hw_fib_pool) == 0) {
+ kfree(fib_pool);
+ fib_pool = NULL;
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
+ } else {
+ kfree(hw_fib_pool);
+ hw_fib_pool = NULL;
+ }
spin_lock_irqsave(&dev->fib_lock, flagv);
entry = dev->fib_list.next;
/*
- * For each Context that is on the
+ * For each Context that is on the
* fibctxList, make a copy of the
* fib, and then set the event to wake up the
* thread that is waiting for it.
*/
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
while (entry != &dev->fib_list) {
/*
* Extract the fibctx
@@ -884,11 +1776,11 @@ int aac_command_thread(struct aac_dev * dev)
*/
time_last = fibctx->jiffies;
/*
- * Has it been > 2 minutes
+ * Has it been > 2 minutes
* since the last read off
* the queue?
*/
- if ((time_now - time_last) > 120) {
+ if ((time_now - time_last) > aif_timeout) {
entry = entry->next;
aac_close_fib_context(dev, fibctx);
continue;
@@ -898,32 +1790,30 @@ int aac_command_thread(struct aac_dev * dev)
* Warning: no sleep allowed while
* holding spinlock
*/
- hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
- newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
- if (newfib && hw_newfib) {
+ if (hw_fib_p < &hw_fib_pool[num]) {
+ hw_newfib = *hw_fib_p;
+ *(hw_fib_p++) = NULL;
+ newfib = *fib_p;
+ *(fib_p++) = NULL;
/*
* Make the copy of the FIB
*/
memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
memcpy(newfib, fib, sizeof(struct fib));
- newfib->hw_fib = hw_newfib;
+ newfib->hw_fib_va = hw_newfib;
/*
* Put the FIB onto the
* fibctx's fibs
*/
list_add_tail(&newfib->fiblink, &fibctx->fib_list);
fibctx->count++;
- /*
+ /*
* Set the event to wake up the
- * thread that will waiting.
+ * thread that is waiting.
*/
up(&fibctx->wait_sem);
} else {
printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
- if(newfib)
- kfree(newfib);
- if(hw_newfib)
- kfree(hw_newfib);
}
entry = entry->next;
}
@@ -931,23 +1821,110 @@ int aac_command_thread(struct aac_dev * dev)
* Set the status of this FIB
*/
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
- fib_adapter_complete(fib, sizeof(u32));
+ aac_fib_adapter_complete(fib, sizeof(u32));
spin_unlock_irqrestore(&dev->fib_lock, flagv);
+ /* Free up the remaining resources */
+ hw_fib_p = hw_fib_pool;
+ fib_p = fib_pool;
+ while (hw_fib_p < &hw_fib_pool[num]) {
+ kfree(*hw_fib_p);
+ kfree(*fib_p);
+ ++fib_p;
+ ++hw_fib_p;
+ }
+ kfree(hw_fib_pool);
+ kfree(fib_pool);
}
- spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
kfree(fib);
+ spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
}
/*
* There are no more AIF's
*/
- spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags);
- schedule();
+ spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
- if(signal_pending(current))
- break;
+ /*
+ * Background activity
+ */
+ if ((time_before(next_check_jiffies,next_jiffies))
+ && ((difference = next_check_jiffies - jiffies) <= 0)) {
+ next_check_jiffies = next_jiffies;
+ if (aac_check_health(dev) == 0) {
+ difference = ((long)(unsigned)check_interval)
+ * HZ;
+ next_check_jiffies = jiffies + difference;
+ } else if (!dev->queues)
+ break;
+ }
+ if (!time_before(next_check_jiffies,next_jiffies)
+ && ((difference = next_jiffies - jiffies) <= 0)) {
+ struct timeval now;
+ int ret;
+
+ /* Don't even try to talk to adapter if its sick */
+ ret = aac_check_health(dev);
+ if (!ret && !dev->queues)
+ break;
+ next_check_jiffies = jiffies
+ + ((long)(unsigned)check_interval)
+ * HZ;
+ do_gettimeofday(&now);
+
+ /* Synchronize our watches */
+ if (((1000000 - (1000000 / HZ)) > now.tv_usec)
+ && (now.tv_usec > (1000000 / HZ)))
+ difference = (((1000000 - now.tv_usec) * HZ)
+ + 500000) / 1000000;
+ else if (ret == 0) {
+ struct fib *fibptr;
+
+ if ((fibptr = aac_fib_alloc(dev))) {
+ int status;
+ __le32 *info;
+
+ aac_fib_init(fibptr);
+
+ info = (__le32 *) fib_data(fibptr);
+ if (now.tv_usec > 500000)
+ ++now.tv_sec;
+
+ *info = cpu_to_le32(now.tv_sec);
+
+ status = aac_fib_send(SendHostTime,
+ fibptr,
+ sizeof(*info),
+ FsaNormal,
+ 1, 1,
+ NULL,
+ NULL);
+ /* Do not set XferState to zero unless
+ * receives a response from F/W */
+ if (status >= 0)
+ aac_fib_complete(fibptr);
+ /* FIB should be freed only after
+ * getting the response from the F/W */
+ if (status != -ERESTARTSYS)
+ aac_fib_free(fibptr);
+ }
+ difference = (long)(unsigned)update_interval*HZ;
+ } else {
+ /* retry shortly */
+ difference = 10 * HZ;
+ }
+ next_jiffies = jiffies + difference;
+ if (time_before(next_check_jiffies,next_jiffies))
+ difference = next_check_jiffies - jiffies;
+ }
+ if (difference <= 0)
+ difference = 1;
set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(difference);
+
+ if (kthread_should_stop())
+ break;
}
- remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait);
+ if (dev->queues)
+ remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
dev->aif_thread = 0;
- complete_and_exit(&dev->aif_completion, 0);
+ return 0;
}
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index be2e98de9fa..d81b2810f0f 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,13 +33,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/blkdev.h>
-#include <asm/semaphore.h>
+#include <linux/semaphore.h>
#include "aacraid.h"
@@ -59,9 +58,9 @@ unsigned int aac_response_normal(struct aac_queue * q)
struct hw_fib * hwfib;
struct fib * fib;
int consumed = 0;
- unsigned long flags;
+ unsigned long flags, mflags;
- spin_lock_irqsave(q->lock, flags);
+ spin_lock_irqsave(q->lock, flags);
/*
* Keep pulling response QEs off the response queue and waking
* up the waiters until there are no more QEs. We then return
@@ -73,8 +72,8 @@ unsigned int aac_response_normal(struct aac_queue * q)
int fast;
u32 index = le32_to_cpu(entry->addr);
fast = index & 0x01;
- fib = &dev->fibs[index >> 1];
- hwfib = fib->hw_fib;
+ fib = &dev->fibs[index >> 2];
+ hwfib = fib->hw_fib_va;
aac_consumer_free(dev, q, HostNormRespQueue);
/*
@@ -85,12 +84,13 @@ unsigned int aac_response_normal(struct aac_queue * q)
* continue. The caller has already been notified that
* the fib timed out.
*/
- if (!(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
- list_del(&fib->queue);
- dev->queues->queue[AdapNormCmdQueue].numpending--;
- } else {
- printk(KERN_WARNING "aacraid: FIB timeout (%x).\n", fib->flags);
- printk(KERN_DEBUG"aacraid: hwfib=%p fib index=%i fib=%p\n",hwfib, hwfib->header.SenderData,fib);
+ dev->queues->queue[AdapNormCmdQueue].numpending--;
+
+ if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+ spin_unlock_irqrestore(q->lock, flags);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ spin_lock_irqsave(q->lock, flags);
continue;
}
spin_unlock_irqrestore(q->lock, flags);
@@ -101,6 +101,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
@@ -121,14 +122,29 @@ unsigned int aac_response_normal(struct aac_queue * q)
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
- fib->done = 1;
- up(&fib->event_wait);
+ if (!fib->done) {
+ fib->done = 1;
+ up(&fib->event_wait);
+ }
spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (fib->done == 2) {
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ fib->done = 0;
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
}
consumed++;
spin_lock_irqsave(q->lock, flags);
@@ -190,7 +206,7 @@ unsigned int aac_command_normal(struct aac_queue *q)
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
fib->size = sizeof(struct fib);
- fib->hw_fib = hw_fib;
+ fib->hw_fib_va = hw_fib;
fib->data = hw_fib->data;
fib->dev = dev;
@@ -206,10 +222,199 @@ unsigned int aac_command_normal(struct aac_queue *q)
* Set the status of this FIB
*/
*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
- fib_adapter_complete(fib, sizeof(u32));
+ aac_fib_adapter_complete(fib, sizeof(u32));
spin_lock_irqsave(q->lock, flags);
}
}
spin_unlock_irqrestore(q->lock, flags);
return 0;
}
+
+/*
+ *
+ * aac_aif_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the AIFs - new method (SRC)
+ *
+ */
+
+static void aac_aif_callback(void *context, struct fib * fibptr)
+{
+ struct fib *fibctx;
+ struct aac_dev *dev;
+ struct aac_aifcmd *cmd;
+ int status;
+
+ fibctx = (struct fib *)context;
+ BUG_ON(fibptr == NULL);
+ dev = fibptr->dev;
+
+ if (fibptr->hw_fib_va->header.XferState &
+ cpu_to_le32(NoMoreAifDataAvailable)) {
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return;
+ }
+
+ aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
+
+ aac_fib_init(fibctx);
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ status = aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
+}
+
+
+/**
+ * aac_intr_normal - Handle command replies
+ * @dev: Device
+ * @index: completion reference
+ *
+ * This DPC routine will be run when the adapter interrupts us to let us
+ * know there is a response on our normal priority queue. We will pull off
+ * all QE there are and wake up all the waiters before exiting.
+ */
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
+ int isAif, int isFastResponse, struct hw_fib *aif_fib)
+{
+ unsigned long mflags;
+ dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
+ if (isAif == 1) { /* AIF - common */
+ struct hw_fib * hw_fib;
+ struct fib * fib;
+ struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
+ unsigned long flags;
+
+ /*
+ * Allocate a FIB. For non queued stuff we can just use
+ * the stack so we are happy. We need a fib object in order to
+ * manage the linked lists.
+ */
+ if ((!dev->aif_thread)
+ || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
+ return 1;
+ if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
+ kfree (fib);
+ return 1;
+ }
+ if (aif_fib != NULL) {
+ memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
+ } else {
+ memcpy(hw_fib,
+ (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
+ index), sizeof(struct hw_fib));
+ }
+ INIT_LIST_HEAD(&fib->fiblink);
+ fib->type = FSAFS_NTC_FIB_CONTEXT;
+ fib->size = sizeof(struct fib);
+ fib->hw_fib_va = hw_fib;
+ fib->data = hw_fib->data;
+ fib->dev = dev;
+
+ spin_lock_irqsave(q->lock, flags);
+ list_add_tail(&fib->fiblink, &q->cmdq);
+ wake_up_interruptible(&q->cmdready);
+ spin_unlock_irqrestore(q->lock, flags);
+ return 1;
+ } else if (isAif == 2) { /* AIF - new (SRC) */
+ struct fib *fibctx;
+ struct aac_aifcmd *cmd;
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return 1;
+ aac_fib_init(fibctx);
+
+ cmd = (struct aac_aifcmd *) fib_data(fibctx);
+ cmd->command = cpu_to_le32(AifReqEvent);
+
+ return aac_fib_send(AifRequest,
+ fibctx,
+ sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+ FsaNormal,
+ 0, 1,
+ (fib_callback)aac_aif_callback, fibctx);
+ } else {
+ struct fib *fib = &dev->fibs[index];
+ struct hw_fib * hwfib = fib->hw_fib_va;
+
+ /*
+ * Remove this fib from the Outstanding I/O queue.
+ * But only if it has not already been timed out.
+ *
+ * If the fib has been timed out already, then just
+ * continue. The caller has already been notified that
+ * the fib timed out.
+ */
+ dev->queues->queue[AdapNormCmdQueue].numpending--;
+
+ if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ return 0;
+ }
+
+ if (isFastResponse) {
+ /*
+ * Doctor the fib
+ */
+ *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+ hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+ }
+
+ FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+ if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
+ {
+ __le32 *pstatus = (__le32 *)hwfib->data;
+ if (*pstatus & cpu_to_le32(0xffff0000))
+ *pstatus = cpu_to_le32(ST_OK);
+ }
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async))
+ {
+ if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
+ FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+ else
+ FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+ /*
+ * NOTE: we cannot touch the fib after this
+ * call, because it may have been deallocated.
+ */
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
+ fib->callback(fib->callback_data, fib);
+ } else {
+ unsigned long flagv;
+ dprintk((KERN_INFO "event_wait up\n"));
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ if (!fib->done) {
+ fib->done = 1;
+ up(&fib->event_wait);
+ }
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+ spin_lock_irqsave(&dev->manage_lock, mflags);
+ dev->management_fib_count--;
+ spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
+ FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+ if (fib->done == 2) {
+ spin_lock_irqsave(&fib->event_lock, flagv);
+ fib->done = 0;
+ spin_unlock_irqrestore(&fib->event_lock, flagv);
+ aac_fib_complete(fib);
+ aac_fib_free(fib);
+ }
+
+ }
+ return 0;
+ }
+}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index c1a4f978fcb..4921ed19a02 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,9 +28,6 @@
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller
*/
-#define AAC_DRIVER_VERSION "1.1.2-lk2"
-#define AAC_DRIVER_BUILD_DATE __DATE__
-#define AAC_DRIVERNAME "aacraid"
#include <linux/compat.h>
#include <linux/blkdev.h>
@@ -40,13 +38,13 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
-#include <linux/ioctl32.h>
#include <linux/delay.h>
-#include <linux/smp_lock.h>
-#include <asm/semaphore.h>
+#include <linux/kthread.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -58,16 +56,31 @@
#include "aacraid.h"
+#define AAC_DRIVER_VERSION "1.2-0"
+#ifndef AAC_DRIVER_BRANCH
+#define AAC_DRIVER_BRANCH ""
+#endif
+#define AAC_DRIVERNAME "aacraid"
+
+#ifdef AAC_DRIVER_BUILD
+#define _str(x) #x
+#define str(x) _str(x)
+#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
+#else
+#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
+#endif
MODULE_AUTHOR("Red Hat Inc and Adaptec");
MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
"Adaptec Advanced Raid Products, "
- "and HP NetRAID-4M SCSI driver");
+ "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(AAC_DRIVER_VERSION);
+MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
+static DEFINE_MUTEX(aac_mutex);
static LIST_HEAD(aac_devices);
static int aac_cfg_major = -1;
+char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
/*
* Because of the way Linux names scsi devices, the order in this table has
@@ -75,7 +88,7 @@ static int aac_cfg_major = -1;
*
* Note: The last field is used to index into aac_drivers below.
*/
-static struct pci_device_id aac_pci_tbl[] = {
+static const struct pci_device_id aac_pci_tbl[] = {
{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
@@ -102,69 +115,77 @@ static struct pci_device_id aac_pci_tbl[] = {
{ 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
{ 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
{ 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
- { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024R0 (Lancer) */
- { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014R0 (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
+ { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
{ 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
- { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5085AU (Hurricane) */
+ { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
{ 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
{ 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
- { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 32 }, /* Themisto Jupiter Platform */
- { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 32 }, /* Themisto Jupiter Platform */
- { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 33 }, /* Callisto Jupiter Platform */
- { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 34 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
- { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 35 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
- { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 36 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
- { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 37 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
- { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 38 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
- { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 39 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
- { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 40 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
- { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 41 }, /* AAR-2610SA PCI SATA 6ch */
- { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 42 }, /* ASR-2240S (SabreExpress) */
- { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 43 }, /* ASR-4005SAS */
- { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 44 }, /* IBM 8i (AvonPark) */
- { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 44 }, /* IBM 8i (AvonPark Lite) */
- { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 45 }, /* ASR-4000SAS (BlackBird) */
- { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 46 }, /* ASR-4800SAS (Marauder-X) */
- { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 47 }, /* ASR-4805SAS (Marauder-E) */
- { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 48 }, /* ASR-4810SAS (Hurricane */
-
- { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 49 }, /* Perc 320/DC*/
- { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 50 }, /* Adaptec 5400S (Mustang)*/
- { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 51 }, /* Adaptec 5400S (Mustang)*/
- { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 52 }, /* Dell PERC2/QC */
- { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 53 }, /* HP NetRAID-4M */
-
- { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 54 }, /* Dell Catchall */
- { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 55 }, /* Legend Catchall */
- { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 56 }, /* Adaptec Catch All */
- { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 57 }, /* Adaptec Rocket Catch All */
+ { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
+ { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
+ { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
+ { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
+ { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
+ { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
+ { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+ { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+ { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+ { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+ { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+ { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
+ { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
+ { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
+ { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
+ { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
+ { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
+ { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
+ { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
+ { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
+ { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
+ { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
+
+ { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
+ { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
+ { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
+ { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
+ { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
+
+ { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
+ { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
+ { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
+ { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
+ { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
+ { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
+ { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
+ { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
+ { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
{ 0,}
};
MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
/*
- * dmb - For now we add the number of channels to this structure.
+ * dmb - For now we add the number of channels to this structure.
* In the future we should add a fib that reports the number of channels
* for the card. At that time we can remove the channels from here
*/
static struct aac_driver_ident aac_drivers[] = {
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 2/Si (Iguana/PERC2Si) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Opal/PERC3Di) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Si (SlimFast/PERC3Si */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Viper/PERC3DiV) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Lexus/PERC3DiL) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Dagger/PERC3DiD) */
- { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* PERC 3/Di (Boxster/PERC3DiB) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* catapult */
- { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* tomcat */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan-2m) */
- { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S220 (Legend Crusader) */
- { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend S230 (Legend Vulcan) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
+ { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
+ { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
+ { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
@@ -175,41 +196,49 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
- { aac_rkt_init, "aacraid", "ICP ", "ICP9024R0 ", 2 }, /* ICP9024R0 (Lancer) */
- { aac_rkt_init, "aacraid", "ICP ", "ICP9014R0 ", 1 }, /* ICP9014R0 (Lancer) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
- { aac_rkt_init, "aacraid", "ICP ", "ICP5085AU ", 1 }, /* ICP5085AU (Hurricane) */
- { aac_rkt_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
- { aac_rkt_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
+ { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
+ { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
+ { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
{ NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
- { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+ { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
{ aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */
+ { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
+ { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
- { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4810SAS ", 1 }, /* ASR-4810SAS (Hurricane) */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
{ aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
- { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell PERC2/QC */
+ { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
{ aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
- { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
- { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
- { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */
- { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec Rocket Catch All */
+ { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
+ { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
+ { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
+ { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
+ { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
};
/**
@@ -220,13 +249,28 @@ static struct aac_driver_ident aac_drivers[] = {
* Queues a command for execution by the associated Host Adapter.
*
* TODO: unify with aac_scsi_cmd().
- */
+ */
-static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
+ struct Scsi_Host *host = cmd->device->host;
+ struct aac_dev *dev = (struct aac_dev *)host->hostdata;
+ u32 count = 0;
cmd->scsi_done = done;
+ for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct fib * fib = &dev->fibs[count];
+ struct scsi_cmnd * command;
+ if (fib->hw_fib_va->header.XferState &&
+ ((command = fib->callback_data)) &&
+ (command == cmd) &&
+ (cmd->SCp.phase == AAC_OWNER_FIRMWARE))
+ return 0; /* Already owned by Adapter */
+ }
+ cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
return (aac_scsi_cmd(cmd) ? FAILED : 0);
-}
+}
+
+static DEF_SCSI_QCMD(aac_queuecommand)
/**
* aac_info - Returns the host adapter name
@@ -243,9 +287,9 @@ static const char *aac_info(struct Scsi_Host *shost)
/**
* aac_get_driver_ident
- * @devtype: index into lookup table
+ * @devtype: index into lookup table
*
- * Returns a pointer to the entry in the driver lookup table.
+ * Returns a pointer to the entry in the driver lookup table.
*/
struct aac_driver_ident* aac_get_driver_ident(int devtype)
@@ -260,21 +304,21 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
* @capacity: the sector capacity of the disk
* @geom: geometry block to fill in
*
- * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
- * The default disk geometry is 64 heads, 32 sectors, and the appropriate
- * number of cylinders so as not to exceed drive capacity. In order for
+ * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
+ * The default disk geometry is 64 heads, 32 sectors, and the appropriate
+ * number of cylinders so as not to exceed drive capacity. In order for
* disks equal to or larger than 1 GB to be addressable by the BIOS
- * without exceeding the BIOS limitation of 1024 cylinders, Extended
- * Translation should be enabled. With Extended Translation enabled,
- * drives between 1 GB inclusive and 2 GB exclusive are given a disk
- * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
- * are given a disk geometry of 255 heads and 63 sectors. However, if
- * the BIOS detects that the Extended Translation setting does not match
- * the geometry in the partition table, then the translation inferred
- * from the partition table will be used by the BIOS, and a warning may
+ * without exceeding the BIOS limitation of 1024 cylinders, Extended
+ * Translation should be enabled. With Extended Translation enabled,
+ * drives between 1 GB inclusive and 2 GB exclusive are given a disk
+ * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
+ * are given a disk geometry of 255 heads and 63 sectors. However, if
+ * the BIOS detects that the Extended Translation setting does not match
+ * the geometry in the partition table, then the translation inferred
+ * from the partition table will be used by the BIOS, and a warning may
* be displayed.
*/
-
+
static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
sector_t capacity, int *geom)
{
@@ -301,13 +345,15 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
- /*
+ /*
* Read the first 1024 bytes from the disk device, if the boot
* sector partition table is valid, search for a partition table
- * entry whose end_head matches one of the standard geometry
+ * entry whose end_head matches one of the standard geometry
* translations ( 64/32, 128/32, 255/63 ).
*/
buf = scsi_bios_ptable(bdev);
+ if (!buf)
+ return 0;
if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
struct partition *first = (struct partition * )buf;
struct partition *entry = first;
@@ -367,25 +413,186 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
static int aac_slave_configure(struct scsi_device *sdev)
{
- struct Scsi_Host *host = sdev->host;
+ struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+ if (aac->jbod && (sdev->type == TYPE_DISK))
+ sdev->removable = 1;
+ if ((sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
+ (!aac->jbod || sdev->inq_periph_qual) &&
+ (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
+ if (expose_physicals == 0)
+ return -ENXIO;
+ if (expose_physicals < 0)
+ sdev->no_uld_attach = 1;
+ }
+ if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+ (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
+ !sdev->no_uld_attach) {
+ struct scsi_device * dev;
+ struct Scsi_Host *host = sdev->host;
+ unsigned num_lsu = 0;
+ unsigned num_one = 0;
+ unsigned depth;
+ unsigned cid;
- if (sdev->tagged_supported)
- scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 128);
- else
+ /*
+ * Firmware has an individual device recovery time typically
+ * of 35 seconds, give us a margin.
+ */
+ if (sdev->request_queue->rq_timeout < (45 * HZ))
+ blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+ for (cid = 0; cid < aac->maximum_num_containers; ++cid)
+ if (aac->fsa_dev[cid].valid)
+ ++num_lsu;
+ __shost_for_each_device(dev, host) {
+ if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+ (!aac->raid_scsi_mode ||
+ (sdev_channel(sdev) != 2)) &&
+ !dev->no_uld_attach) {
+ if ((sdev_channel(dev) != CONTAINER_CHANNEL)
+ || !aac->fsa_dev[sdev_id(dev)].valid)
+ ++num_lsu;
+ } else
+ ++num_one;
+ }
+ if (num_lsu == 0)
+ ++num_lsu;
+ depth = (host->can_queue - num_one) / num_lsu;
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 2)
+ depth = 2;
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
+ } else
scsi_adjust_queue_depth(sdev, 0, 1);
- if (host->max_sectors < AAC_MAX_32BIT_SGBCOUNT)
- blk_queue_max_segment_size(sdev->request_queue, 65536);
-
return 0;
}
+/**
+ * aac_change_queue_depth - alter queue depths
+ * @sdev: SCSI device we are considering
+ * @depth: desired queue depth
+ *
+ * Alters queue depths for target device based on the host adapter's
+ * total capacity and the queue depth supported by the target device.
+ */
+
+static int aac_change_queue_depth(struct scsi_device *sdev, int depth,
+ int reason)
+{
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ return -EOPNOTSUPP;
+
+ if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+ (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
+ struct scsi_device * dev;
+ struct Scsi_Host *host = sdev->host;
+ unsigned num = 0;
+
+ __shost_for_each_device(dev, host) {
+ if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+ (sdev_channel(dev) == CONTAINER_CHANNEL))
+ ++num;
+ ++num;
+ }
+ if (num >= host->can_queue)
+ num = host->can_queue - 1;
+ if (depth > (host->can_queue - num))
+ depth = host->can_queue - num;
+ if (depth > 256)
+ depth = 256;
+ else if (depth < 2)
+ depth = 2;
+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
+ } else
+ scsi_adjust_queue_depth(sdev, 0, 1);
+ return sdev->queue_depth;
+}
+
+static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+ if (sdev_channel(sdev) != CONTAINER_CHANNEL)
+ return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
+ ? "Hidden\n" :
+ ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
+}
+
+static struct device_attribute aac_raid_level_attr = {
+ .attr = {
+ .name = "level",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_raid_level
+};
+
+static struct device_attribute *aac_dev_attrs[] = {
+ &aac_raid_level_attr,
+ NULL,
+};
+
static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
return aac_do_ioctl(dev, cmd, arg);
}
+static int aac_eh_abort(struct scsi_cmnd* cmd)
+{
+ struct scsi_device * dev = cmd->device;
+ struct Scsi_Host * host = dev->host;
+ struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+ int count;
+ int ret = FAILED;
+
+ printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
+ AAC_DRIVERNAME,
+ host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
+ switch (cmd->cmnd[0]) {
+ case SERVICE_ACTION_IN:
+ if (!(aac->raw_io_interface) ||
+ !(aac->raw_io_64) ||
+ ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+ break;
+ case INQUIRY:
+ case READ_CAPACITY:
+ /* Mark associated FIB to not complete, eh handler does this */
+ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct fib * fib = &aac->fibs[count];
+ if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ (fib->callback_data == cmd)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ ret = SUCCESS;
+ }
+ }
+ break;
+ case TEST_UNIT_READY:
+ /* Mark associated FIB to not complete, eh handler does this */
+ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct scsi_cmnd * command;
+ struct fib * fib = &aac->fibs[count];
+ if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ ((command = fib->callback_data)) &&
+ (command->device == cmd->device)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ if (command == cmd)
+ ret = SUCCESS;
+ }
+ }
+ }
+ return ret;
+}
+
/*
* aac_eh_reset - Reset command handling
* @scsi_cmd: SCSI command block causing the reset
@@ -397,32 +604,37 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
struct Scsi_Host * host = dev->host;
struct scsi_cmnd * command;
int count;
- struct aac_dev * aac;
+ struct aac_dev * aac = (struct aac_dev *)host->hostdata;
unsigned long flags;
- printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
+ /* Mark the associated FIB to not complete, eh handler does this */
+ for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+ struct fib * fib = &aac->fibs[count];
+ if (fib->hw_fib_va->header.XferState &&
+ (fib->flags & FIB_CONTEXT_FLAG) &&
+ (fib->callback_data == cmd)) {
+ fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+ cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ }
+ }
+ printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
AAC_DRIVERNAME);
-
- spin_lock_irq(host->host_lock);
-
- aac = (struct aac_dev *)host->hostdata;
- if (aac_adapter_check_health(aac)) {
- printk(KERN_ERR "%s: Host adapter appears dead\n",
- AAC_DRIVERNAME);
- spin_unlock_irq(host->host_lock);
- return -ENODEV;
- }
+ if ((count = aac_check_health(aac)))
+ return count;
/*
* Wait for all commands to complete to this specific
* target (block maximum 60 seconds).
*/
for (count = 60; count; --count) {
- int active = 0;
+ int active = aac->in_reset;
+
+ if (active == 0)
__shost_for_each_device(dev, host) {
spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->serial_number) {
+ if ((command != cmd) &&
+ (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
active++;
break;
}
@@ -437,13 +649,23 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
*/
if (active == 0)
return SUCCESS;
- spin_unlock_irq(host->host_lock);
ssleep(1);
- spin_lock_irq(host->host_lock);
}
- spin_unlock_irq(host->host_lock);
printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
- return -ETIMEDOUT;
+ /*
+ * This adapter needs a blind reset, only do so for Adapters that
+ * support a register, instead of a commanded, reset.
+ */
+ if (((aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_MU_RESET) ||
+ (aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET)) &&
+ aac_check_reset &&
+ ((aac_check_reset != 1) ||
+ !(aac->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_IGNORE_RESET)))
+ aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
+ return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
}
/**
@@ -464,6 +686,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
unsigned minor_number = iminor(inode);
int err = -ENODEV;
+ mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id == minor_number) {
file->private_data = aac;
@@ -471,6 +694,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
break;
}
}
+ mutex_unlock(&aac_mutex);
return err;
}
@@ -488,19 +712,26 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
* Bugs: Needs locking against parallel ioctls lower down
* Bugs: Needs to handle hot plugging
*/
-
-static int aac_cfg_ioctl(struct inode *inode, struct file *file,
+
+static long aac_cfg_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
+ int ret;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ mutex_lock(&aac_mutex);
+ ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
+ mutex_unlock(&aac_mutex);
+
+ return ret;
}
#ifdef CONFIG_COMPAT
static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
{
long ret;
- lock_kernel();
- switch (cmd) {
+ mutex_lock(&aac_mutex);
+ switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
case FSACTL_SENDFIB:
case FSACTL_OPEN_GET_ADAPTER_FIB:
@@ -510,211 +741,370 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
case FSACTL_QUERY_DISK:
case FSACTL_DELETE_DISK:
case FSACTL_FORCE_DELETE_DISK:
- case FSACTL_GET_CONTAINERS:
+ case FSACTL_GET_CONTAINERS:
case FSACTL_SEND_LARGE_FIB:
ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
break;
case FSACTL_GET_NEXT_ADAPTER_FIB: {
struct fib_ioctl __user *f;
-
+
f = compat_alloc_user_space(sizeof(*f));
ret = 0;
- if (clear_user(f, sizeof(*f) != sizeof(*f)))
+ if (clear_user(f, sizeof(*f)))
ret = -EFAULT;
if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
ret = -EFAULT;
if (!ret)
- ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
+ ret = aac_do_ioctl(dev, cmd, f);
break;
}
default:
- ret = -ENOIOCTLCMD;
+ ret = -ENOIOCTLCMD;
break;
- }
- unlock_kernel();
+ }
+ mutex_unlock(&aac_mutex);
return ret;
}
static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
}
static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
- return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ return aac_compat_do_ioctl(file->private_data, cmd, arg);
}
#endif
-static ssize_t aac_show_model(struct class_device *class_dev,
- char *buf)
+static ssize_t aac_show_model(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len;
- len = snprintf(buf, PAGE_SIZE, "%s\n",
+ if (dev->supplement_adapter_info.AdapterTypeText[0]) {
+ char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ while (*cp && *cp != ' ')
+ ++cp;
+ while (*cp == ' ')
+ ++cp;
+ len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
+ } else
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
aac_drivers[dev->cardtype].model);
return len;
}
-static ssize_t aac_show_vendor(struct class_device *class_dev,
- char *buf)
+static ssize_t aac_show_vendor(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len;
- len = snprintf(buf, PAGE_SIZE, "%s\n",
+ if (dev->supplement_adapter_info.AdapterTypeText[0]) {
+ char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ while (*cp && *cp != ' ')
+ ++cp;
+ len = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
+ dev->supplement_adapter_info.AdapterTypeText);
+ } else
+ len = snprintf(buf, PAGE_SIZE, "%s\n",
aac_drivers[dev->cardtype].vname);
return len;
}
-static ssize_t aac_show_kernel_version(struct class_device *class_dev,
- char *buf)
+static ssize_t aac_show_flags(struct device *cdev,
+ struct device_attribute *attr, char *buf)
{
- struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ int len = 0;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
+
+ if (nblank(dprintk(x)))
+ len = snprintf(buf, PAGE_SIZE, "dprintk\n");
+#ifdef AAC_DETAILED_STATUS_INFO
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "AAC_DETAILED_STATUS_INFO\n");
+#endif
+ if (dev->raw_io_interface && dev->raw_io_64)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "SAI_READ_CAPACITY_16\n");
+ if (dev->jbod)
+ len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+ if (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_POWER_MANAGEMENT)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "SUPPORTED_POWER_MANAGEMENT\n");
+ if (dev->msi)
+ len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
+ return len;
+}
+
+static ssize_t aac_show_kernel_version(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
- len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.kernelbuild));
return len;
}
-static ssize_t aac_show_monitor_version(struct class_device *class_dev,
- char *buf)
+static ssize_t aac_show_monitor_version(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
- struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.monitorrev);
- len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.monitorbuild));
return len;
}
-static ssize_t aac_show_bios_version(struct class_device *class_dev,
- char *buf)
+static ssize_t aac_show_bios_version(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
- struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len, tmp;
tmp = le32_to_cpu(dev->adapter_info.biosrev);
- len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+ len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
le32_to_cpu(dev->adapter_info.biosbuild));
return len;
}
-static ssize_t aac_show_serial_number(struct class_device *class_dev,
- char *buf)
+static ssize_t aac_show_serial_number(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len = 0;
if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
- len = snprintf(buf, PAGE_SIZE, "%x\n",
+ len = snprintf(buf, 16, "%06X\n",
le32_to_cpu(dev->adapter_info.serial[0]));
- return len;
+ if (len &&
+ !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
+ sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
+ buf, len-1))
+ len = snprintf(buf, 16, "%.*s\n",
+ (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
+ dev->supplement_adapter_info.MfgPcbaSerialNo);
+
+ return min(len, 16);
}
+static ssize_t aac_show_max_channel(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ class_to_shost(device)->max_channel);
+}
+
+static ssize_t aac_show_max_id(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ class_to_shost(device)->max_id);
+}
+
+static ssize_t aac_store_reset_adapter(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int retval = -EACCES;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return retval;
+ retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
+ if (retval >= 0)
+ retval = count;
+ return retval;
+}
+
+static ssize_t aac_show_reset_adapter(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ int len, tmp;
-static struct class_device_attribute aac_model = {
+ tmp = aac_adapter_check_health(dev);
+ if ((tmp == 0) && dev->in_reset)
+ tmp = -EBUSY;
+ len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
+ return len;
+}
+
+static struct device_attribute aac_model = {
.attr = {
.name = "model",
.mode = S_IRUGO,
},
.show = aac_show_model,
};
-static struct class_device_attribute aac_vendor = {
+static struct device_attribute aac_vendor = {
.attr = {
.name = "vendor",
.mode = S_IRUGO,
},
.show = aac_show_vendor,
};
-static struct class_device_attribute aac_kernel_version = {
+static struct device_attribute aac_flags = {
+ .attr = {
+ .name = "flags",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_flags,
+};
+static struct device_attribute aac_kernel_version = {
.attr = {
.name = "hba_kernel_version",
.mode = S_IRUGO,
},
.show = aac_show_kernel_version,
};
-static struct class_device_attribute aac_monitor_version = {
+static struct device_attribute aac_monitor_version = {
.attr = {
.name = "hba_monitor_version",
.mode = S_IRUGO,
},
.show = aac_show_monitor_version,
};
-static struct class_device_attribute aac_bios_version = {
+static struct device_attribute aac_bios_version = {
.attr = {
.name = "hba_bios_version",
.mode = S_IRUGO,
},
.show = aac_show_bios_version,
};
-static struct class_device_attribute aac_serial_number = {
+static struct device_attribute aac_serial_number = {
.attr = {
.name = "serial_number",
.mode = S_IRUGO,
},
.show = aac_show_serial_number,
};
+static struct device_attribute aac_max_channel = {
+ .attr = {
+ .name = "max_channel",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_max_channel,
+};
+static struct device_attribute aac_max_id = {
+ .attr = {
+ .name = "max_id",
+ .mode = S_IRUGO,
+ },
+ .show = aac_show_max_id,
+};
+static struct device_attribute aac_reset = {
+ .attr = {
+ .name = "reset_host",
+ .mode = S_IWUSR|S_IRUGO,
+ },
+ .store = aac_store_reset_adapter,
+ .show = aac_show_reset_adapter,
+};
-static struct class_device_attribute *aac_attrs[] = {
+static struct device_attribute *aac_attrs[] = {
&aac_model,
&aac_vendor,
+ &aac_flags,
&aac_kernel_version,
&aac_monitor_version,
&aac_bios_version,
&aac_serial_number,
+ &aac_max_channel,
+ &aac_max_id,
+ &aac_reset,
NULL
};
+ssize_t aac_get_serial_number(struct device *device, char *buf)
+{
+ return aac_show_serial_number(device, &aac_serial_number, buf);
+}
-static struct file_operations aac_cfg_fops = {
+static const struct file_operations aac_cfg_fops = {
.owner = THIS_MODULE,
- .ioctl = aac_cfg_ioctl,
+ .unlocked_ioctl = aac_cfg_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = aac_compat_cfg_ioctl,
#endif
.open = aac_cfg_open,
+ .llseek = noop_llseek,
};
static struct scsi_host_template aac_driver_template = {
.module = THIS_MODULE,
- .name = "AAC",
+ .name = "AAC",
.proc_name = AAC_DRIVERNAME,
- .info = aac_info,
- .ioctl = aac_ioctl,
+ .info = aac_info,
+ .ioctl = aac_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = aac_compat_ioctl,
#endif
- .queuecommand = aac_queuecommand,
- .bios_param = aac_biosparm,
+ .queuecommand = aac_queuecommand,
+ .bios_param = aac_biosparm,
.shost_attrs = aac_attrs,
.slave_configure = aac_slave_configure,
+ .change_queue_depth = aac_change_queue_depth,
+ .sdev_attrs = aac_dev_attrs,
+ .eh_abort_handler = aac_eh_abort,
.eh_host_reset_handler = aac_eh_reset,
- .can_queue = AAC_NUM_IO_FIB,
- .this_id = MAXIMUM_NUM_CONTAINERS,
- .sg_tablesize = 16,
- .max_sectors = 128,
+ .can_queue = AAC_NUM_IO_FIB,
+ .this_id = MAXIMUM_NUM_CONTAINERS,
+ .sg_tablesize = 16,
+ .max_sectors = 128,
#if (AAC_NUM_IO_FIB > 256)
.cmd_per_lun = 256,
-#else
- .cmd_per_lun = AAC_NUM_IO_FIB,
-#endif
+#else
+ .cmd_per_lun = AAC_NUM_IO_FIB,
+#endif
.use_clustering = ENABLE_CLUSTERING,
+ .emulated = 1,
+ .no_write_same = 1,
};
+static void __aac_shutdown(struct aac_dev * aac)
+{
+ if (aac->aif_thread) {
+ int i;
+ /* Clear out events first */
+ for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
+ struct fib *fib = &aac->fibs[i];
+ if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+ (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
+ up(&fib->event_wait);
+ }
+ kthread_stop(aac->thread);
+ }
+ aac_send_shutdown(aac);
+ aac_adapter_disable_int(aac);
+ free_irq(aac->pdev->irq, aac);
+ if (aac->msi)
+ pci_disable_msi(aac->pdev);
+}
-static int __devinit aac_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned index = id->driver_data;
struct Scsi_Host *shost;
@@ -722,6 +1112,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
struct list_head *insert = &aac_devices;
int error = -ENODEV;
int unique_id = 0;
+ u64 dmamask;
+ extern int aac_sync_mode;
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id > unique_id)
@@ -730,21 +1122,27 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
unique_id++;
}
- if (pci_enable_device(pdev))
- goto out;
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
- if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
- pci_set_consistent_dma_mask(pdev, 0xFFFFFFFFULL))
+ error = pci_enable_device(pdev);
+ if (error)
goto out;
+ error = -ENODEV;
+
/*
* If the quirk31 bit is set, the adapter needs adapter
* to driver communication memory to be allocated below 2gig
*/
- if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
- if (pci_set_dma_mask(pdev, 0x7FFFFFFFULL) ||
- pci_set_consistent_dma_mask(pdev, 0x7FFFFFFFULL))
- goto out;
-
+ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+ dmamask = DMA_BIT_MASK(31);
+ else
+ dmamask = DMA_BIT_MASK(32);
+
+ if (pci_set_dma_mask(pdev, dmamask) ||
+ pci_set_consistent_dma_mask(pdev, dmamask))
+ goto out_disable_pdev;
+
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
@@ -752,24 +1150,55 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
goto out_disable_pdev;
shost->irq = pdev->irq;
- shost->base = pci_resource_start(pdev, 0);
shost->unique_id = unique_id;
+ shost->max_cmd_len = 16;
aac = (struct aac_dev *)shost->hostdata;
- aac->scsi_host_ptr = shost;
+ aac->base_start = pci_resource_start(pdev, 0);
+ aac->scsi_host_ptr = shost;
aac->pdev = pdev;
aac->name = aac_driver_template.name;
aac->id = shost->unique_id;
- aac->cardtype = index;
+ aac->cardtype = index;
INIT_LIST_HEAD(&aac->entry);
- aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
+ aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
if (!aac->fibs)
goto out_free_host;
spin_lock_init(&aac->fib_lock);
+ /*
+ * Map in the registers from the adapter.
+ */
+ aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
if ((*aac_drivers[index].init)(aac))
- goto out_free_fibs;
+ goto out_unmap;
+
+ if (aac->sync_mode) {
+ if (aac_sync_mode)
+ printk(KERN_INFO "%s%d: Sync. mode enforced "
+ "by driver parameter. This will cause "
+ "a significant performance decrease!\n",
+ aac->name,
+ aac->id);
+ else
+ printk(KERN_INFO "%s%d: Async. mode not supported "
+ "by current driver, sync. mode enforced."
+ "\nPlease update driver to get full performance.\n",
+ aac->name,
+ aac->id);
+ }
+
+ /*
+ * Start any kernel threads needed
+ */
+ aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
+ if (IS_ERR(aac->thread)) {
+ printk(KERN_ERR "aacraid: Unable to create command thread.\n");
+ error = PTR_ERR(aac->thread);
+ aac->thread = NULL;
+ goto out_deinit;
+ }
/*
* If we had set a smaller DMA mask earlier, set it to 4gig
@@ -777,41 +1206,54 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
* address space.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
- if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL))
- goto out_free_fibs;
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ goto out_deinit;
aac->maximum_num_channels = aac_drivers[index].channels;
- aac_get_adapter_info(aac);
+ error = aac_get_adapter_info(aac);
+ if (error < 0)
+ goto out_deinit;
/*
- * Lets override negotiations and drop the maximum SG limit to 34
- */
- if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
- (aac->scsi_host_ptr->sg_tablesize > 34)) {
- aac->scsi_host_ptr->sg_tablesize = 34;
- aac->scsi_host_ptr->max_sectors
- = (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
- }
+ * Lets override negotiations and drop the maximum SG limit to 34
+ */
+ if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
+ (shost->sg_tablesize > 34)) {
+ shost->sg_tablesize = 34;
+ shost->max_sectors = (shost->sg_tablesize * 8) + 112;
+ }
+
+ if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
+ (shost->sg_tablesize > 17)) {
+ shost->sg_tablesize = 17;
+ shost->max_sectors = (shost->sg_tablesize * 8) + 112;
+ }
+
+ error = pci_set_dma_max_seg_size(pdev,
+ (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
+ (shost->max_sectors << 9) : 65536);
+ if (error)
+ goto out_deinit;
/*
- * Firware printf works only with older firmware.
+ * Firmware printf works only with older firmware.
*/
- if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
+ if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
aac->printf_enabled = 1;
else
aac->printf_enabled = 0;
-
- /*
+
+ /*
* max channel will be the physical channels plus 1 virtual channel
- * all containers are on the virtual channel 0
+ * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
* physical channels are address by their actual physical number+1
*/
- if (aac->nondasd_support == 1)
- shost->max_channel = aac->maximum_num_channels + 1;
+ if (aac->nondasd_support || expose_physicals || aac->jbod)
+ shost->max_channel = aac->maximum_num_channels;
else
- shost->max_channel = 1;
+ shost->max_channel = 0;
- aac_get_config_status(aac);
+ aac_get_config_status(aac, 0);
aac_get_containers(aac);
list_add(&aac->entry, insert);
@@ -838,17 +1280,15 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
return 0;
-out_deinit:
- kill_proc(aac->thread_pid, SIGKILL, 0);
- wait_for_completion(&aac->aif_completion);
-
- aac_send_shutdown(aac);
- fib_map_free(aac);
- pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
+ out_deinit:
+ __aac_shutdown(aac);
+ out_unmap:
+ aac_fib_map_free(aac);
+ if (aac->comm_addr)
+ pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
+ aac->comm_phys);
kfree(aac->queues);
- free_irq(pdev->irq, aac);
- iounmap(aac->regs.sa);
- out_free_fibs:
+ aac_adapter_ioremap(aac, 0);
kfree(aac->fibs);
kfree(aac->fsa_dev);
out_free_host:
@@ -859,61 +1299,72 @@ out_deinit:
return error;
}
-static void __devexit aac_remove_one(struct pci_dev *pdev)
+static void aac_shutdown(struct pci_dev *dev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(dev);
+ scsi_block_requests(shost);
+ __aac_shutdown((struct aac_dev *)shost->hostdata);
+}
+
+static void aac_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_remove_host(shost);
- kill_proc(aac->thread_pid, SIGKILL, 0);
- wait_for_completion(&aac->aif_completion);
-
- aac_send_shutdown(aac);
- fib_map_free(aac);
+ __aac_shutdown(aac);
+ aac_fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
kfree(aac->queues);
- free_irq(pdev->irq, aac);
- iounmap(aac->regs.sa);
-
+ aac_adapter_ioremap(aac, 0);
+
kfree(aac->fibs);
-
+ kfree(aac->fsa_dev);
+
list_del(&aac->entry);
scsi_host_put(shost);
pci_disable_device(pdev);
+ if (list_empty(&aac_devices)) {
+ unregister_chrdev(aac_cfg_major, "aac");
+ aac_cfg_major = -1;
+ }
}
static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
- .remove = __devexit_p(aac_remove_one),
+ .remove = aac_remove_one,
+ .shutdown = aac_shutdown,
};
static int __init aac_init(void)
{
int error;
-
- printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n",
- AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE);
- error = pci_module_init(&aac_pci_driver);
- if (error)
+ printk(KERN_INFO "Adaptec %s driver %s\n",
+ AAC_DRIVERNAME, aac_driver_version);
+
+ error = pci_register_driver(&aac_pci_driver);
+ if (error < 0)
return error;
aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
if (aac_cfg_major < 0) {
printk(KERN_WARNING
- "aacraid: unable to register \"aac\" device.\n");
+ "aacraid: unable to register \"aac\" device.\n");
}
+
return 0;
}
static void __exit aac_exit(void)
{
- unregister_chrdev(aac_cfg_major, "aac");
+ if (aac_cfg_major > -1)
+ unregister_chrdev(aac_cfg_major, "aac");
pci_unregister_driver(&aac_pci_driver);
}
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
new file mode 100644
index 00000000000..6c53b1d8b2b
--- /dev/null
+++ b/drivers/scsi/aacraid/nark.c
@@ -0,0 +1,84 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * nark.c
+ *
+ * Abstract: Hardware Device Interface for NEMER/ARK
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/**
+ * aac_nark_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.rx);
+ dev->regs.rx = NULL;
+ iounmap(dev->base);
+ dev->base = NULL;
+ return 0;
+ }
+ dev->base_start = pci_resource_start(dev->pdev, 2);
+ dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
+ ((u64)pci_resource_start(dev->pdev, 1) << 32),
+ sizeof(struct rx_registers) - sizeof(struct rx_inbound));
+ dev->base = NULL;
+ if (dev->regs.rx == NULL)
+ return -1;
+ dev->base = ioremap(dev->base_start, size);
+ if (dev->base == NULL) {
+ iounmap(dev->regs.rx);
+ dev->regs.rx = NULL;
+ return -1;
+ }
+ dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs;
+ return 0;
+}
+
+/**
+ * aac_nark_init - initialize an NEMER/ARK Split Bar card
+ * @dev: device to configure
+ *
+ */
+
+int aac_nark_init(struct aac_dev * dev)
+{
+ /*
+ * Fill in the function dispatch table.
+ */
+ dev->a_ops.adapter_ioremap = aac_nark_ioremap;
+ dev->a_ops.adapter_comm = aac_rx_select_comm;
+
+ return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index 7d68b782513..7d8013feedd 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,314 +29,60 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <asm/semaphore.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
-static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct aac_dev *dev = dev_id;
- unsigned long bellbits;
- u8 intstat, mask;
- intstat = rkt_readb(dev, MUnit.OISR);
- /*
- * Read mask and invert because drawbridge is reversed.
- * This allows us to only service interrupts that have
- * been enabled.
- */
- mask = ~(dev->OIMR);
- /* Check to see if this is our interrupt. If it isn't just return */
- if (intstat & mask)
- {
- bellbits = rkt_readl(dev, OutboundDoorbellReg);
- if (bellbits & DoorBellPrintfReady) {
- aac_printf(dev, rkt_readl(dev, IndexRegs.Mailbox[5]));
- rkt_writel(dev, MUnit.ODR,DoorBellPrintfReady);
- rkt_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
- }
- else if (bellbits & DoorBellAdapterNormCmdReady) {
- rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
- aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
- }
- else if (bellbits & DoorBellAdapterNormRespReady) {
- aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
- rkt_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
- }
- else if (bellbits & DoorBellAdapterNormCmdNotFull) {
- rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
- }
- else if (bellbits & DoorBellAdapterNormRespNotFull) {
- rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
- rkt_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
- }
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
-}
+#define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB)
/**
- * rkt_sync_cmd - send a command and wait
+ * aac_rkt_select_comm - Select communications method
* @dev: Adapter
- * @command: Command to execute
- * @p1: first parameter
- * @ret: adapter status
- *
- * This routine will send a synchronous command to the adapter and wait
- * for its completion.
+ * @comm: communications method
*/
-static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
- u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
- u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
+static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
{
- unsigned long start;
- int ok;
- /*
- * Write the command into Mailbox 0
- */
- rkt_writel(dev, InboundMailbox0, command);
- /*
- * Write the parameters into Mailboxes 1 - 6
- */
- rkt_writel(dev, InboundMailbox1, p1);
- rkt_writel(dev, InboundMailbox2, p2);
- rkt_writel(dev, InboundMailbox3, p3);
- rkt_writel(dev, InboundMailbox4, p4);
- /*
- * Clear the synch command doorbell to start on a clean slate.
- */
- rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
- /*
- * Disable doorbell interrupts
- */
- rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
- /*
- * Force the completion of the mask register write before issuing
- * the interrupt.
- */
- rkt_readb (dev, MUnit.OIMR);
- /*
- * Signal that there is a new synch command
- */
- rkt_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
-
- ok = 0;
- start = jiffies;
-
- /*
- * Wait up to 30 seconds
- */
- while (time_before(jiffies, start+30*HZ))
- {
- udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
+ int retval;
+ retval = aac_rx_select_comm(dev, comm);
+ if (comm == AAC_COMM_MESSAGE) {
/*
- * Mon960 will set doorbell0 bit when it has completed the command.
+ * FIB Setup has already been done, but we can minimize the
+ * damage by at least ensuring the OS never issues more
+ * commands than we can handle. The Rocket adapters currently
+ * can only handle 246 commands and 8 AIFs at the same time,
+ * and in fact do notify us accordingly if we negotiate the
+ * FIB size. The problem that causes us to add this check is
+ * to ensure that we do not overdo it with the adapter when a
+ * hard coded FIB override is being utilized. This special
+ * case warrants this half baked, but convenient, check here.
*/
- if (rkt_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
- /*
- * Clear the doorbell.
- */
- rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
- ok = 1;
- break;
+ if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
+ dev->init->MaxIoCommands =
+ cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
+ dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
}
- /*
- * Yield the processor in case we are slow
- */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
}
- if (ok != 1) {
- /*
- * Restore interrupt mask even though we timed out
- */
- rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
- return -ETIMEDOUT;
- }
- /*
- * Pull the synch status from Mailbox 0.
- */
- if (status)
- *status = rkt_readl(dev, IndexRegs.Mailbox[0]);
- if (r1)
- *r1 = rkt_readl(dev, IndexRegs.Mailbox[1]);
- if (r2)
- *r2 = rkt_readl(dev, IndexRegs.Mailbox[2]);
- if (r3)
- *r3 = rkt_readl(dev, IndexRegs.Mailbox[3]);
- if (r4)
- *r4 = rkt_readl(dev, IndexRegs.Mailbox[4]);
- /*
- * Clear the synch command doorbell.
- */
- rkt_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
- /*
- * Restore interrupt mask
- */
- rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
- return 0;
-
+ return retval;
}
/**
- * aac_rkt_interrupt_adapter - interrupt adapter
- * @dev: Adapter
+ * aac_rkt_ioremap
+ * @size: mapping resize request
*
- * Send an interrupt to the i960 and breakpoint it.
*/
-
-static void aac_rkt_interrupt_adapter(struct aac_dev *dev)
+static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
{
- rkt_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
- NULL, NULL, NULL, NULL, NULL);
-}
-
-/**
- * aac_rkt_notify_adapter - send an event to the adapter
- * @dev: Adapter
- * @event: Event to send
- *
- * Notify the i960 that something it probably cares about has
- * happened.
- */
-
-static void aac_rkt_notify_adapter(struct aac_dev *dev, u32 event)
-{
- switch (event) {
-
- case AdapNormCmdQue:
- rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
- break;
- case HostNormRespNotFull:
- rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
- break;
- case AdapNormRespQue:
- rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
- break;
- case HostNormCmdNotFull:
- rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
- break;
- case HostShutdown:
-// rkt_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
-// NULL, NULL, NULL, NULL, NULL);
- break;
- case FastIo:
- rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
- break;
- case AdapPrintfDone:
- rkt_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
- break;
- default:
- BUG();
- break;
+ if (!size) {
+ iounmap(dev->regs.rkt);
+ return 0;
}
-}
-
-/**
- * aac_rkt_start_adapter - activate adapter
- * @dev: Adapter
- *
- * Start up processing on an i960 based AAC adapter
- */
-
-static void aac_rkt_start_adapter(struct aac_dev *dev)
-{
- struct aac_init *init;
-
- init = dev->init;
- init->HostElapsedSeconds = cpu_to_le32(get_seconds());
- /*
- * First clear out all interrupts. Then enable the one's that we
- * can handle.
- */
- rkt_writeb(dev, MUnit.OIMR, 0xff);
- rkt_writel(dev, MUnit.ODR, 0xffffffff);
-// rkt_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
- rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
-
- // We can only use a 32 bit address here
- rkt_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
- 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
-}
-
-/**
- * aac_rkt_check_health
- * @dev: device to check if healthy
- *
- * Will attempt to determine if the specified adapter is alive and
- * capable of handling requests, returning 0 if alive.
- */
-static int aac_rkt_check_health(struct aac_dev *dev)
-{
- u32 status = rkt_readl(dev, MUnit.OMRx[0]);
-
- /*
- * Check to see if the board failed any self tests.
- */
- if (status & SELF_TEST_FAILED)
+ dev->base = dev->regs.rkt = ioremap(dev->base_start, size);
+ if (dev->base == NULL)
return -1;
- /*
- * Check to see if the board panic'd.
- */
- if (status & KERNEL_PANIC) {
- char * buffer;
- struct POSTSTATUS {
- __le32 Post_Command;
- __le32 Post_Address;
- } * post;
- dma_addr_t paddr, baddr;
- int ret;
-
- if ((status & 0xFF000000L) == 0xBC000000L)
- return (status >> 16) & 0xFF;
- buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
- ret = -2;
- if (buffer == NULL)
- return ret;
- post = pci_alloc_consistent(dev->pdev,
- sizeof(struct POSTSTATUS), &paddr);
- if (post == NULL) {
- pci_free_consistent(dev->pdev, 512, buffer, baddr);
- return ret;
- }
- memset(buffer, 0, 512);
- post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
- post->Post_Address = cpu_to_le32(baddr);
- rkt_writel(dev, MUnit.IMRx[0], paddr);
- rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
- NULL, NULL, NULL, NULL, NULL);
- pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
- post, paddr);
- if ((buffer[0] == '0') && (buffer[1] == 'x')) {
- ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
- ret <<= 4;
- ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
- }
- pci_free_consistent(dev->pdev, 512, buffer, baddr);
- return ret;
- }
- /*
- * Wait for the adapter to be up and running.
- */
- if (!(status & KERNEL_UP_AND_RUNNING))
- return -3;
- /*
- * Everything is OK
- */
+ dev->IndexRegs = &dev->regs.rkt->IndexRegs;
return 0;
}
@@ -350,98 +97,11 @@ static int aac_rkt_check_health(struct aac_dev *dev)
int aac_rkt_init(struct aac_dev *dev)
{
- unsigned long start;
- unsigned long status;
- int instance;
- const char * name;
-
- instance = dev->id;
- name = dev->name;
-
- /*
- * Map in the registers from the adapter.
- */
- if((dev->regs.rkt = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
- {
- printk(KERN_WARNING "aacraid: unable to map i960.\n" );
- goto error_iounmap;
- }
- /*
- * Check to see if the board failed any self tests.
- */
- if (rkt_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
- printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
- goto error_iounmap;
- }
- /*
- * Check to see if the monitor panic'd while booting.
- */
- if (rkt_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
- printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
- goto error_iounmap;
- }
- /*
- * Check to see if the board panic'd while booting.
- */
- if (rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
- printk(KERN_ERR "%s%d: adapter kernel panic'd.\n", dev->name, instance);
- goto error_iounmap;
- }
- start = jiffies;
- /*
- * Wait for the adapter to be up and running. Wait up to 3 minutes
- */
- while (!(rkt_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING))
- {
- if(time_after(jiffies, start+180*HZ))
- {
- status = rkt_readl(dev, MUnit.OMRx[0]);
- printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
- dev->name, instance, status);
- goto error_iounmap;
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- }
- if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
- {
- printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
- goto error_iounmap;
- }
/*
* Fill in the function dispatch table.
*/
- dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
- dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
- dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
- dev->a_ops.adapter_check_health = aac_rkt_check_health;
-
- if (aac_init_adapter(dev) == NULL)
- goto error_irq;
- /*
- * Start any kernel threads needed
- */
- dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
- if(dev->thread_pid < 0)
- {
- printk(KERN_ERR "aacraid: Unable to create rkt thread.\n");
- goto error_kfree;
- }
- /*
- * Tell the adapter that all is configured, and it can start
- * accepting requests
- */
- aac_rkt_start_adapter(dev);
- return 0;
-
-error_kfree:
- kfree(dev->queues);
-
-error_irq:
- free_irq(dev->scsi_host_ptr->irq, (void *)dev);
-
-error_iounmap:
- iounmap(dev->regs.rkt);
+ dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
+ dev->a_ops.adapter_comm = aac_rkt_select_comm;
- return -1;
+ return _aac_rx_init(dev);
}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 1ff25f49fad..5c6a8703f53 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,54 +32,49 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/time.h>
#include <linux/interrupt.h>
-#include <asm/semaphore.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
-static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
{
struct aac_dev *dev = dev_id;
unsigned long bellbits;
- u8 intstat, mask;
- intstat = rx_readb(dev, MUnit.OISR);
+ u8 intstat = rx_readb(dev, MUnit.OISR);
+
/*
* Read mask and invert because drawbridge is reversed.
- * This allows us to only service interrupts that have
+ * This allows us to only service interrupts that have
* been enabled.
+ * Check to see if this is our interrupt. If it isn't just return
*/
- mask = ~(dev->OIMR);
- /* Check to see if this is our interrupt. If it isn't just return */
- if (intstat & mask)
- {
+ if (likely(intstat & ~(dev->OIMR))) {
bellbits = rx_readl(dev, OutboundDoorbellReg);
- if (bellbits & DoorBellPrintfReady) {
- aac_printf(dev, rx_readl(dev, IndexRegs.Mailbox[5]));
+ if (unlikely(bellbits & DoorBellPrintfReady)) {
+ aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
}
- else if (bellbits & DoorBellAdapterNormCmdReady) {
+ else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
}
- else if (bellbits & DoorBellAdapterNormRespReady) {
- aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+ else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+ aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
}
- else if (bellbits & DoorBellAdapterNormCmdNotFull) {
+ else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
}
- else if (bellbits & DoorBellAdapterNormRespNotFull) {
+ else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
}
@@ -87,6 +83,75 @@ static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
+static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
+{
+ int isAif, isFastResponse, isSpecial;
+ struct aac_dev *dev = dev_id;
+ u32 Index = rx_readl(dev, MUnit.OutboundQueue);
+ if (unlikely(Index == 0xFFFFFFFFL))
+ Index = rx_readl(dev, MUnit.OutboundQueue);
+ if (likely(Index != 0xFFFFFFFFL)) {
+ do {
+ isAif = isFastResponse = isSpecial = 0;
+ if (Index & 0x00000002L) {
+ isAif = 1;
+ if (Index == 0xFFFFFFFEL)
+ isSpecial = 1;
+ Index &= ~0x00000002L;
+ } else {
+ if (Index & 0x00000001L)
+ isFastResponse = 1;
+ Index >>= 2;
+ }
+ if (!isSpecial) {
+ if (unlikely(aac_intr_normal(dev,
+ Index, isAif,
+ isFastResponse, NULL))) {
+ rx_writel(dev,
+ MUnit.OutboundQueue,
+ Index);
+ rx_writel(dev,
+ MUnit.ODR,
+ DoorBellAdapterNormRespReady);
+ }
+ }
+ Index = rx_readl(dev, MUnit.OutboundQueue);
+ } while (Index != 0xFFFFFFFFL);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * aac_rx_disable_interrupt - Disable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_rx_disable_interrupt(struct aac_dev *dev)
+{
+ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
+}
+
+/**
+ * aac_rx_enable_interrupt_producer - Enable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
+{
+ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+}
+
+/**
+ * aac_rx_enable_interrupt_message - Enable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
+{
+ rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
+}
+
/**
* rx_sync_cmd - send a command and wait
* @dev: Adapter
@@ -107,14 +172,14 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Write the command into Mailbox 0
*/
- rx_writel(dev, InboundMailbox0, command);
+ writel(command, &dev->IndexRegs->Mailbox[0]);
/*
* Write the parameters into Mailboxes 1 - 6
*/
- rx_writel(dev, InboundMailbox1, p1);
- rx_writel(dev, InboundMailbox2, p2);
- rx_writel(dev, InboundMailbox3, p3);
- rx_writel(dev, InboundMailbox4, p4);
+ writel(p1, &dev->IndexRegs->Mailbox[1]);
+ writel(p2, &dev->IndexRegs->Mailbox[2]);
+ writel(p3, &dev->IndexRegs->Mailbox[3]);
+ writel(p4, &dev->IndexRegs->Mailbox[4]);
/*
* Clear the synch command doorbell to start on a clean slate.
*/
@@ -156,29 +221,28 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Yield the processor in case we are slow
*/
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
+ msleep(1);
}
- if (ok != 1) {
+ if (unlikely(ok != 1)) {
/*
* Restore interrupt mask even though we timed out
*/
- rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
+ aac_adapter_enable_int(dev);
return -ETIMEDOUT;
}
/*
* Pull the synch status from Mailbox 0.
*/
if (status)
- *status = rx_readl(dev, IndexRegs.Mailbox[0]);
+ *status = readl(&dev->IndexRegs->Mailbox[0]);
if (r1)
- *r1 = rx_readl(dev, IndexRegs.Mailbox[1]);
+ *r1 = readl(&dev->IndexRegs->Mailbox[1]);
if (r2)
- *r2 = rx_readl(dev, IndexRegs.Mailbox[2]);
+ *r2 = readl(&dev->IndexRegs->Mailbox[2]);
if (r3)
- *r3 = rx_readl(dev, IndexRegs.Mailbox[3]);
+ *r3 = readl(&dev->IndexRegs->Mailbox[3]);
if (r4)
- *r4 = rx_readl(dev, IndexRegs.Mailbox[4]);
+ *r4 = readl(&dev->IndexRegs->Mailbox[4]);
/*
* Clear the synch command doorbell.
*/
@@ -186,7 +250,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
/*
* Restore interrupt mask
*/
- rx_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
+ aac_adapter_enable_int(dev);
return 0;
}
@@ -229,8 +293,6 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
break;
case HostShutdown:
-// rx_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
-// NULL, NULL, NULL, NULL, NULL);
break;
case FastIo:
rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
@@ -257,15 +319,6 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
- /*
- * First clear out all interrupts. Then enable the one's that we
- * can handle.
- */
- rx_writeb(dev, MUnit.OIMR, 0xff);
- rx_writel(dev, MUnit.ODR, 0xffffffff);
-// rx_writeb(dev, MUnit.OIMR, ~(u8)OUTBOUND_DOORBELL_INTERRUPT_MASK);
- rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
-
// We can only use a 32 bit address here
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
@@ -285,12 +338,12 @@ static int aac_rx_check_health(struct aac_dev *dev)
/*
* Check to see if the board failed any self tests.
*/
- if (status & SELF_TEST_FAILED)
+ if (unlikely(status & SELF_TEST_FAILED))
return -1;
/*
* Check to see if the board panic'd.
*/
- if (status & KERNEL_PANIC) {
+ if (unlikely(status & KERNEL_PANIC)) {
char * buffer;
struct POSTSTATUS {
__le32 Post_Command;
@@ -299,15 +352,15 @@ static int aac_rx_check_health(struct aac_dev *dev)
dma_addr_t paddr, baddr;
int ret;
- if ((status & 0xFF000000L) == 0xBC000000L)
+ if (likely((status & 0xFF000000L) == 0xBC000000L))
return (status >> 16) & 0xFF;
buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
ret = -2;
- if (buffer == NULL)
+ if (unlikely(buffer == NULL))
return ret;
post = pci_alloc_consistent(dev->pdev,
sizeof(struct POSTSTATUS), &paddr);
- if (post == NULL) {
+ if (unlikely(post == NULL)) {
pci_free_consistent(dev->pdev, 512, buffer, baddr);
return ret;
}
@@ -319,10 +372,9 @@ static int aac_rx_check_health(struct aac_dev *dev)
NULL, NULL, NULL, NULL, NULL);
pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
post, paddr);
- if ((buffer[0] == '0') && (buffer[1] == 'x')) {
- ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
- ret <<= 4;
- ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
+ if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
+ ret = (hex_to_bin(buffer[2]) << 4) +
+ hex_to_bin(buffer[3]);
}
pci_free_consistent(dev->pdev, 512, buffer, baddr);
return ret;
@@ -330,7 +382,7 @@ static int aac_rx_check_health(struct aac_dev *dev)
/*
* Wait for the adapter to be up and running.
*/
- if (!(status & KERNEL_UP_AND_RUNNING))
+ if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
return -3;
/*
* Everything is OK
@@ -339,6 +391,156 @@ static int aac_rx_check_health(struct aac_dev *dev)
}
/**
+ * aac_rx_deliver_producer
+ * @fib: fib to issue
+ *
+ * Will send a fib, returning 0 if successful.
+ */
+int aac_rx_deliver_producer(struct fib * fib)
+{
+ struct aac_dev *dev = fib->dev;
+ struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+ unsigned long qflags;
+ u32 Index;
+ unsigned long nointr = 0;
+
+ spin_lock_irqsave(q->lock, qflags);
+ aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
+
+ q->numpending++;
+ *(q->headers.producer) = cpu_to_le32(Index + 1);
+ spin_unlock_irqrestore(q->lock, qflags);
+ if (!(nointr & aac_config.irq_mod))
+ aac_adapter_notify(dev, AdapNormCmdQueue);
+
+ return 0;
+}
+
+/**
+ * aac_rx_deliver_message
+ * @fib: fib to issue
+ *
+ * Will send a fib, returning 0 if successful.
+ */
+static int aac_rx_deliver_message(struct fib * fib)
+{
+ struct aac_dev *dev = fib->dev;
+ struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+ unsigned long qflags;
+ u32 Index;
+ u64 addr;
+ volatile void __iomem *device;
+
+ unsigned long count = 10000000L; /* 50 seconds */
+ spin_lock_irqsave(q->lock, qflags);
+ q->numpending++;
+ spin_unlock_irqrestore(q->lock, qflags);
+ for(;;) {
+ Index = rx_readl(dev, MUnit.InboundQueue);
+ if (unlikely(Index == 0xFFFFFFFFL))
+ Index = rx_readl(dev, MUnit.InboundQueue);
+ if (likely(Index != 0xFFFFFFFFL))
+ break;
+ if (--count == 0) {
+ spin_lock_irqsave(q->lock, qflags);
+ q->numpending--;
+ spin_unlock_irqrestore(q->lock, qflags);
+ return -ETIMEDOUT;
+ }
+ udelay(5);
+ }
+ device = dev->base + Index;
+ addr = fib->hw_fib_pa;
+ writel((u32)(addr & 0xffffffff), device);
+ device += sizeof(u32);
+ writel((u32)(addr >> 32), device);
+ device += sizeof(u32);
+ writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
+ rx_writel(dev, MUnit.InboundQueue, Index);
+ return 0;
+}
+
+/**
+ * aac_rx_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.rx);
+ return 0;
+ }
+ dev->base = dev->regs.rx = ioremap(dev->base_start, size);
+ if (dev->base == NULL)
+ return -1;
+ dev->IndexRegs = &dev->regs.rx->IndexRegs;
+ return 0;
+}
+
+static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
+{
+ u32 var = 0;
+
+ if (!(dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
+ if (bled)
+ printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ dev->name, dev->id, bled);
+ else {
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+ if (!bled && (var != 0x00000001) && (var != 0x3803000F))
+ bled = -EINVAL;
+ }
+ if (bled && (bled != -ETIMEDOUT))
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET,
+ 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+
+ if (bled && (bled != -ETIMEDOUT))
+ return -EINVAL;
+ }
+ if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
+ rx_writel(dev, MUnit.reserved2, 3);
+ msleep(5000); /* Delay 5 seconds */
+ var = 0x00000001;
+ }
+ if (bled && (var != 0x00000001))
+ return -EINVAL;
+ ssleep(5);
+ if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
+ return -ENODEV;
+ if (startup_timeout < 300)
+ startup_timeout = 300;
+ return 0;
+}
+
+/**
+ * aac_rx_select_comm - Select communications method
+ * @dev: Adapter
+ * @comm: communications method
+ */
+
+int aac_rx_select_comm(struct aac_dev *dev, int comm)
+{
+ switch (comm) {
+ case AAC_COMM_PRODUCER:
+ dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
+ dev->a_ops.adapter_intr = aac_rx_intr_producer;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ break;
+ case AAC_COMM_MESSAGE:
+ dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
+ dev->a_ops.adapter_intr = aac_rx_intr_message;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_message;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/**
* aac_rx_init - initialize an i960 based AAC card
* @dev: device to configure
*
@@ -347,42 +549,49 @@ static int aac_rx_check_health(struct aac_dev *dev)
* to the comm region.
*/
-int aac_rx_init(struct aac_dev *dev)
+int _aac_rx_init(struct aac_dev *dev)
{
unsigned long start;
unsigned long status;
- int instance;
- const char * name;
+ int restart = 0;
+ int instance = dev->id;
+ const char * name = dev->name;
- instance = dev->id;
- name = dev->name;
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
+ dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
+ if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
+ !aac_rx_restart_adapter(dev, 0))
+ /* Make sure the Hardware FIFO is empty */
+ while ((++restart < 512) &&
+ (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
/*
- * Map in the registers from the adapter.
+ * Check to see if the board panic'd while booting.
*/
- if((dev->regs.rx = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
- {
- printk(KERN_WARNING "aacraid: unable to map i960.\n" );
- return -1;
+ status = rx_readl(dev, MUnit.OMRx[0]);
+ if (status & KERNEL_PANIC) {
+ if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
}
/*
* Check to see if the board failed any self tests.
*/
- if (rx_readl(dev, MUnit.OMRx[0]) & SELF_TEST_FAILED) {
+ status = rx_readl(dev, MUnit.OMRx[0]);
+ if (status & SELF_TEST_FAILED) {
printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
goto error_iounmap;
}
/*
- * Check to see if the board panic'd while booting.
- */
- if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC) {
- printk(KERN_ERR "%s%d: adapter kernel panic.\n", dev->name, instance);
- goto error_iounmap;
- }
- /*
* Check to see if the monitor panic'd while booting.
*/
- if (rx_readl(dev, MUnit.OMRx[0]) & MONITOR_PANIC) {
+ if (status & MONITOR_PANIC) {
printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
goto error_iounmap;
}
@@ -390,58 +599,86 @@ int aac_rx_init(struct aac_dev *dev)
/*
* Wait for the adapter to be up and running. Wait up to 3 minutes
*/
- while ((!(rx_readl(dev, IndexRegs.Mailbox[7]) & KERNEL_UP_AND_RUNNING))
- || (!(rx_readl(dev, MUnit.OMRx[0]) & KERNEL_UP_AND_RUNNING)))
+ while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
{
- if(time_after(jiffies, start+180*HZ))
- {
- status = rx_readl(dev, IndexRegs.Mailbox[7]);
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
dev->name, instance, status);
goto error_iounmap;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- }
- if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
- {
- printk(KERN_ERR "%s%d: Interrupt unavailable.\n", name, instance);
- goto error_iounmap;
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
}
+ if (restart && aac_commit)
+ aac_commit = 1;
/*
- * Fill in the function dispatch table.
+ * Fill in the common function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
dev->a_ops.adapter_notify = aac_rx_notify_adapter;
dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_check_health = aac_rx_check_health;
+ dev->a_ops.adapter_restart = aac_rx_restart_adapter;
- if (aac_init_adapter(dev) == NULL)
- goto error_irq;
/*
- * Start any kernel threads needed
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
*/
- dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
- if(dev->thread_pid < 0)
- {
- printk(KERN_ERR "aacraid: Unable to create rx thread.\n");
- goto error_kfree;
+ aac_adapter_comm(dev, AAC_COMM_PRODUCER);
+ aac_adapter_disable_int(dev);
+ rx_writel(dev, MUnit.ODR, 0xffffffff);
+ aac_adapter_enable_int(dev);
+
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ aac_adapter_comm(dev, dev->comm_interface);
+ dev->sync_mode = 0; /* sync. mode not supported */
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid", dev) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
}
+ dev->dbg_base = dev->base_start;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
+ aac_adapter_enable_int(dev);
/*
- * Tell the adapter that all is configured, and it can start
- * accepting requests
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
*/
aac_rx_start_adapter(dev);
- return 0;
-error_kfree:
- kfree(dev->queues);
-
-error_irq:
- free_irq(dev->scsi_host_ptr->irq, (void *)dev);
+ return 0;
error_iounmap:
- iounmap(dev->regs.rx);
return -1;
}
+
+int aac_rx_init(struct aac_dev *dev)
+{
+ /*
+ * Fill in the function dispatch table.
+ */
+ dev->a_ops.adapter_ioremap = aac_rx_ioremap;
+ dev->a_ops.adapter_comm = aac_rx_select_comm;
+
+ return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 0680249ab86..e66477c9824 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -1,11 +1,12 @@
/*
* Adaptec AAC series RAID controller driver
- * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ * (c) Copyright 2001 Red Hat Inc.
*
* based on the old aacraid driver that is..
* Adaptec aacraid device driver for Linux.
*
- * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,22 +32,19 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/time.h>
#include <linux/interrupt.h>
-#include <asm/semaphore.h>
#include <scsi/scsi_host.h>
#include "aacraid.h"
-static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t aac_sa_intr(int irq, void *dev_id)
{
struct aac_dev *dev = dev_id;
unsigned short intstat, mask;
@@ -66,11 +64,11 @@ static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
sa_writew(dev, DoorbellReg_s, PrintfDone);
} else if (intstat & DOORBELL_1) { // dev -> Host Normal Command Ready
- aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
+ aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
} else if (intstat & DOORBELL_2) { // dev -> Host Normal Response Ready
- aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
+ aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
} else if (intstat & DOORBELL_3) { // dev -> Host Normal Command Not Full
sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
} else if (intstat & DOORBELL_4) { // dev -> Host Normal Response Not Full
@@ -82,6 +80,27 @@ static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
}
/**
+ * aac_sa_disable_interrupt - disable interrupt
+ * @dev: Which adapter to enable.
+ */
+
+static void aac_sa_disable_interrupt (struct aac_dev *dev)
+{
+ sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
+}
+
+/**
+ * aac_sa_enable_interrupt - enable interrupt
+ * @dev: Which adapter to enable.
+ */
+
+static void aac_sa_enable_interrupt (struct aac_dev *dev)
+{
+ sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
+ DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
+}
+
+/**
* aac_sa_notify_adapter - handle adapter notification
* @dev: Adapter that notification is for
* @event: Event to notidy
@@ -179,8 +198,7 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
ok = 1;
break;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
+ msleep(1);
}
if (ok != 1)
@@ -214,9 +232,8 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
static void aac_sa_interrupt_adapter (struct aac_dev *dev)
{
- u32 ret;
sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
- &ret, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL);
}
/**
@@ -228,29 +245,21 @@ static void aac_sa_interrupt_adapter (struct aac_dev *dev)
static void aac_sa_start_adapter(struct aac_dev *dev)
{
- u32 ret;
struct aac_init *init;
/*
* Fill in the remaining pieces of the init.
*/
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
-
- /*
- * Tell the adapter we are back and up and running so it will scan its command
- * queues and enable our interrupts
- */
- dev->irq_mask = (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4);
- /*
- * First clear out all interrupts. Then enable the one's that
- * we can handle.
- */
- sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
- sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 | DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
/* We can only use a 32 bit address here */
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
- &ret, NULL, NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL, NULL);
+}
+
+static int aac_sa_restart_adapter(struct aac_dev *dev, int bled)
+{
+ return -EINVAL;
}
/**
@@ -286,6 +295,21 @@ static int aac_sa_check_health(struct aac_dev *dev)
}
/**
+ * aac_sa_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.sa);
+ return 0;
+ }
+ dev->base = dev->regs.sa = ioremap(dev->base_start, size);
+ return (dev->base == NULL) ? -1 : 0;
+}
+
+/**
* aac_sa_init - initialize an ARM based AAC card
* @dev: device to configure
*
@@ -304,15 +328,11 @@ int aac_sa_init(struct aac_dev *dev)
instance = dev->id;
name = dev->name;
- /*
- * Map in the registers from the adapter.
- */
-
- if((dev->regs.sa = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
- {
- printk(KERN_WARNING "aacraid: unable to map ARM.\n" );
+ if (aac_sa_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
}
+
/*
* Check to see if the board failed any self tests.
*/
@@ -332,19 +352,13 @@ int aac_sa_init(struct aac_dev *dev)
* Wait for the adapter to be up and running. Wait up to 3 minutes.
*/
while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
- if (time_after(jiffies, start+180*HZ)) {
+ if (time_after(jiffies, start+startup_timeout*HZ)) {
status = sa_readl(dev, Mailbox7);
printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n",
name, instance, status);
goto error_iounmap;
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- }
-
- if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
- printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", name, instance);
- goto error_iounmap;
+ msleep(1);
}
/*
@@ -352,22 +366,37 @@ int aac_sa_init(struct aac_dev *dev)
*/
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
dev->a_ops.adapter_notify = aac_sa_notify_adapter;
dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+ /*
+ * First clear out all interrupts. Then enable the one's that
+ * we can handle.
+ */
+ aac_adapter_disable_int(dev);
+ aac_adapter_enable_int(dev);
if(aac_init_adapter(dev) == NULL)
goto error_irq;
-
- /*
- * Start any kernel threads needed
- */
- dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, dev, 0);
- if (dev->thread_pid < 0) {
- printk(KERN_ERR "aacraid: Unable to create command thread.\n");
- goto error_kfree;
+ dev->sync_mode = 0; /* sync. mode not supported */
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid", (void *)dev) < 0) {
+ printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
}
+ dev->dbg_base = dev->base_start;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
+ aac_adapter_enable_int(dev);
/*
* Tell the adapter that all is configure, and it can start
@@ -376,15 +405,11 @@ int aac_sa_init(struct aac_dev *dev)
aac_sa_start_adapter(dev);
return 0;
-
-error_kfree:
- kfree(dev->queues);
-
error_irq:
- free_irq(dev->scsi_host_ptr->irq, (void *)dev);
+ aac_sa_disable_interrupt(dev);
+ free_irq(dev->pdev->irq, (void *)dev);
error_iounmap:
- iounmap(dev->regs.sa);
return -1;
}
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
new file mode 100644
index 00000000000..9c65aed2621
--- /dev/null
+++ b/drivers/scsi/aacraid/src.c
@@ -0,0 +1,833 @@
+/*
+ * Adaptec AAC series RAID controller driver
+ * (c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ * src.c
+ *
+ * Abstract: Hardware Device Interface for PMC SRC based controllers
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+{
+ struct aac_dev *dev = dev_id;
+ unsigned long bellbits, bellbits_shifted;
+ int our_interrupt = 0;
+ int isFastResponse;
+ u32 index, handle;
+
+ bellbits = src_readl(dev, MUnit.ODR_R);
+ if (bellbits & PmDoorBellResponseSent) {
+ bellbits = PmDoorBellResponseSent;
+ /* handle async. status */
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ our_interrupt = 1;
+ index = dev->host_rrq_idx;
+ for (;;) {
+ isFastResponse = 0;
+ /* remove toggle bit (31) */
+ handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
+ /* check fast response bit (30) */
+ if (handle & 0x40000000)
+ isFastResponse = 1;
+ handle &= 0x0000ffff;
+ if (handle == 0)
+ break;
+
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+
+ dev->host_rrq[index++] = 0;
+ if (index == dev->scsi_host_ptr->can_queue +
+ AAC_NUM_MGT_FIB)
+ index = 0;
+ dev->host_rrq_idx = index;
+ }
+ } else {
+ bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
+ if (bellbits_shifted & DoorBellAifPending) {
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ our_interrupt = 1;
+ /* handle AIF */
+ aac_intr_normal(dev, 0, 2, 0, NULL);
+ } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
+ unsigned long sflags;
+ struct list_head *entry;
+ int send_it = 0;
+ extern int aac_sync_mode;
+
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+
+ if (!aac_sync_mode) {
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ our_interrupt = 1;
+ }
+
+ if (dev->sync_fib) {
+ our_interrupt = 1;
+ if (dev->sync_fib->callback)
+ dev->sync_fib->callback(dev->sync_fib->callback_data,
+ dev->sync_fib);
+ spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
+ if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+ dev->management_fib_count--;
+ up(&dev->sync_fib->event_wait);
+ }
+ spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
+ spin_lock_irqsave(&dev->sync_lock, sflags);
+ if (!list_empty(&dev->sync_fib_list)) {
+ entry = dev->sync_fib_list.next;
+ dev->sync_fib = list_entry(entry, struct fib, fiblink);
+ list_del(entry);
+ send_it = 1;
+ } else {
+ dev->sync_fib = NULL;
+ }
+ spin_unlock_irqrestore(&dev->sync_lock, sflags);
+ if (send_it) {
+ aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+ (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+ }
+ }
+ }
+ }
+
+ if (our_interrupt) {
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/**
+ * aac_src_disable_interrupt - Disable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_src_disable_interrupt(struct aac_dev *dev)
+{
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+}
+
+/**
+ * aac_src_enable_interrupt_message - Enable interrupts
+ * @dev: Adapter
+ */
+
+static void aac_src_enable_interrupt_message(struct aac_dev *dev)
+{
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8);
+}
+
+/**
+ * src_sync_cmd - send a command and wait
+ * @dev: Adapter
+ * @command: Command to execute
+ * @p1: first parameter
+ * @ret: adapter status
+ *
+ * This routine will send a synchronous command to the adapter and wait
+ * for its completion.
+ */
+
+static int src_sync_cmd(struct aac_dev *dev, u32 command,
+ u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+ u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
+{
+ unsigned long start;
+ int ok;
+
+ /*
+ * Write the command into Mailbox 0
+ */
+ writel(command, &dev->IndexRegs->Mailbox[0]);
+ /*
+ * Write the parameters into Mailboxes 1 - 6
+ */
+ writel(p1, &dev->IndexRegs->Mailbox[1]);
+ writel(p2, &dev->IndexRegs->Mailbox[2]);
+ writel(p3, &dev->IndexRegs->Mailbox[3]);
+ writel(p4, &dev->IndexRegs->Mailbox[4]);
+
+ /*
+ * Clear the synch command doorbell to start on a clean slate.
+ */
+ src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+
+ /*
+ * Disable doorbell interrupts
+ */
+ src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+
+ /*
+ * Force the completion of the mask register write before issuing
+ * the interrupt.
+ */
+ src_readl(dev, MUnit.OIMR);
+
+ /*
+ * Signal that there is a new synch command
+ */
+ src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
+
+ if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) {
+ ok = 0;
+ start = jiffies;
+
+ /*
+ * Wait up to 5 minutes
+ */
+ while (time_before(jiffies, start+300*HZ)) {
+ udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
+ /*
+ * Mon960 will set doorbell0 bit when it has completed the command.
+ */
+ if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
+ /*
+ * Clear the doorbell.
+ */
+ src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ ok = 1;
+ break;
+ }
+ /*
+ * Yield the processor in case we are slow
+ */
+ msleep(1);
+ }
+ if (unlikely(ok != 1)) {
+ /*
+ * Restore interrupt mask even though we timed out
+ */
+ aac_adapter_enable_int(dev);
+ return -ETIMEDOUT;
+ }
+ /*
+ * Pull the synch status from Mailbox 0.
+ */
+ if (status)
+ *status = readl(&dev->IndexRegs->Mailbox[0]);
+ if (r1)
+ *r1 = readl(&dev->IndexRegs->Mailbox[1]);
+ if (r2)
+ *r2 = readl(&dev->IndexRegs->Mailbox[2]);
+ if (r3)
+ *r3 = readl(&dev->IndexRegs->Mailbox[3]);
+ if (r4)
+ *r4 = readl(&dev->IndexRegs->Mailbox[4]);
+
+ /*
+ * Clear the synch command doorbell.
+ */
+ src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+ }
+
+ /*
+ * Restore interrupt mask
+ */
+ aac_adapter_enable_int(dev);
+ return 0;
+}
+
+/**
+ * aac_src_interrupt_adapter - interrupt adapter
+ * @dev: Adapter
+ *
+ * Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_src_interrupt_adapter(struct aac_dev *dev)
+{
+ src_sync_cmd(dev, BREAKPOINT_REQUEST,
+ 0, 0, 0, 0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_src_notify_adapter - send an event to the adapter
+ * @dev: Adapter
+ * @event: Event to send
+ *
+ * Notify the i960 that something it probably cares about has
+ * happened.
+ */
+
+static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
+{
+ switch (event) {
+
+ case AdapNormCmdQue:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
+ break;
+ case HostNormRespNotFull:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
+ break;
+ case AdapNormRespQue:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
+ break;
+ case HostNormCmdNotFull:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
+ break;
+ case FastIo:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
+ break;
+ case AdapPrintfDone:
+ src_writel(dev, MUnit.ODR_C,
+ INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/**
+ * aac_src_start_adapter - activate adapter
+ * @dev: Adapter
+ *
+ * Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_src_start_adapter(struct aac_dev *dev)
+{
+ struct aac_init *init;
+
+ /* reset host_rrq_idx first */
+ dev->host_rrq_idx = 0;
+
+ init = dev->init;
+ init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+
+ /* We can only use a 32 bit address here */
+ src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
+ 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ * aac_src_check_health
+ * @dev: device to check if healthy
+ *
+ * Will attempt to determine if the specified adapter is alive and
+ * capable of handling requests, returning 0 if alive.
+ */
+static int aac_src_check_health(struct aac_dev *dev)
+{
+ u32 status = src_readl(dev, MUnit.OMR);
+
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ if (unlikely(status & SELF_TEST_FAILED))
+ return -1;
+
+ /*
+ * Check to see if the board panic'd.
+ */
+ if (unlikely(status & KERNEL_PANIC))
+ return (status >> 16) & 0xFF;
+ /*
+ * Wait for the adapter to be up and running.
+ */
+ if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
+ return -3;
+ /*
+ * Everything is OK
+ */
+ return 0;
+}
+
+/**
+ * aac_src_deliver_message
+ * @fib: fib to issue
+ *
+ * Will send a fib, returning 0 if successful.
+ */
+static int aac_src_deliver_message(struct fib *fib)
+{
+ struct aac_dev *dev = fib->dev;
+ struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+ unsigned long qflags;
+ u32 fibsize;
+ dma_addr_t address;
+ struct aac_fib_xporthdr *pFibX;
+ u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+
+ spin_lock_irqsave(q->lock, qflags);
+ q->numpending++;
+ spin_unlock_irqrestore(q->lock, qflags);
+
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (hdr_size + 127) / 128 - 1;
+ if (fibsize > (ALIGN32 - 1))
+ return -EMSGSIZE;
+ /* New FIB header, 32-bit */
+ address = fib->hw_fib_pa;
+ fib->hw_fib_va->header.StructType = FIB_MAGIC2;
+ fib->hw_fib_va->header.SenderFibAddress = (u32)address;
+ fib->hw_fib_va->header.u.TimeStamp = 0;
+ BUG_ON(upper_32_bits(address) != 0L);
+ address |= fibsize;
+ } else {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
+ if (fibsize > (ALIGN32 - 1))
+ return -EMSGSIZE;
+
+ /* Fill XPORT header */
+ pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
+ pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
+ pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
+ pFibX->Size = cpu_to_le32(hdr_size);
+
+ /*
+ * The xport header has been 32-byte aligned for us so that fibsize
+ * can be masked out of this address by hardware. -- BenC
+ */
+ address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
+ if (address & (ALIGN32 - 1))
+ return -EINVAL;
+ address |= fibsize;
+ }
+
+ src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
+
+ return 0;
+}
+
+/**
+ * aac_src_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_src_ioremap(struct aac_dev *dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.src.bar1);
+ dev->regs.src.bar1 = NULL;
+ iounmap(dev->regs.src.bar0);
+ dev->base = dev->regs.src.bar0 = NULL;
+ return 0;
+ }
+ dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
+ AAC_MIN_SRC_BAR1_SIZE);
+ dev->base = NULL;
+ if (dev->regs.src.bar1 == NULL)
+ return -1;
+ dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
+ if (dev->base == NULL) {
+ iounmap(dev->regs.src.bar1);
+ dev->regs.src.bar1 = NULL;
+ return -1;
+ }
+ dev->IndexRegs = &((struct src_registers __iomem *)
+ dev->base)->u.tupelo.IndexRegs;
+ return 0;
+}
+
+/**
+ * aac_srcv_ioremap
+ * @size: mapping resize request
+ *
+ */
+static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
+{
+ if (!size) {
+ iounmap(dev->regs.src.bar0);
+ dev->base = dev->regs.src.bar0 = NULL;
+ return 0;
+ }
+ dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
+ if (dev->base == NULL)
+ return -1;
+ dev->IndexRegs = &((struct src_registers __iomem *)
+ dev->base)->u.denali.IndexRegs;
+ return 0;
+}
+
+static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
+{
+ u32 var, reset_mask;
+
+ if (bled >= 0) {
+ if (bled)
+ printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+ dev->name, dev->id, bled);
+ bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+ 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
+ if (bled || (var != 0x00000001))
+ return -EINVAL;
+ if (dev->supplement_adapter_info.SupportedOptions2 &
+ AAC_OPTION_DOORBELL_RESET) {
+ src_writel(dev, MUnit.IDR, reset_mask);
+ msleep(5000); /* Delay 5 seconds */
+ }
+ }
+
+ if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
+ return -ENODEV;
+
+ if (startup_timeout < 300)
+ startup_timeout = 300;
+
+ return 0;
+}
+
+/**
+ * aac_src_select_comm - Select communications method
+ * @dev: Adapter
+ * @comm: communications method
+ */
+int aac_src_select_comm(struct aac_dev *dev, int comm)
+{
+ switch (comm) {
+ case AAC_COMM_MESSAGE:
+ dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
+ dev->a_ops.adapter_intr = aac_src_intr_message;
+ dev->a_ops.adapter_deliver = aac_src_deliver_message;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * aac_src_init - initialize an Cardinal Frey Bar card
+ * @dev: device to configure
+ *
+ */
+
+int aac_src_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int restart = 0;
+ int instance = dev->id;
+ const char *name = dev->name;
+
+ dev->a_ops.adapter_ioremap = aac_src_ioremap;
+ dev->a_ops.adapter_comm = aac_src_select_comm;
+
+ dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ if ((aac_reset_devices || reset_devices) &&
+ !aac_src_restart_adapter(dev, 0))
+ ++restart;
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & KERNEL_PANIC) {
+ if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
+ }
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & SELF_TEST_FAILED) {
+ printk(KERN_ERR "%s%d: adapter self-test failed.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the monitor panic'd while booting.
+ */
+ if (status & MONITOR_PANIC) {
+ printk(KERN_ERR "%s%d: adapter monitor panic.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ while (!((status = src_readl(dev, MUnit.OMR)) &
+ KERNEL_UP_AND_RUNNING)) {
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
+ printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ dev->name, instance, status);
+ goto error_iounmap;
+ }
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_src_restart_adapter(dev,
+ aac_src_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
+ }
+ if (restart && aac_commit)
+ aac_commit = 1;
+ /*
+ * Fill in the common function dispatch table.
+ */
+ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_notify = aac_src_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_src_check_health;
+ dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+ aac_adapter_disable_int(dev);
+ src_writel(dev, MUnit.ODR_C, 0xffffffff);
+ aac_adapter_enable_int(dev);
+
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+ goto error_iounmap;
+
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid", dev) < 0) {
+
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ dev->dbg_base = pci_resource_start(dev->pdev, 2);
+ dev->dbg_base_mapped = dev->regs.src.bar1;
+ dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
+
+ aac_adapter_enable_int(dev);
+
+ if (!dev->sync_mode) {
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_src_start_adapter(dev);
+ }
+ return 0;
+
+error_iounmap:
+
+ return -1;
+}
+
+/**
+ * aac_srcv_init - initialize an SRCv card
+ * @dev: device to configure
+ *
+ */
+
+int aac_srcv_init(struct aac_dev *dev)
+{
+ unsigned long start;
+ unsigned long status;
+ int restart = 0;
+ int instance = dev->id;
+ const char *name = dev->name;
+
+ dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
+ dev->a_ops.adapter_comm = aac_src_select_comm;
+
+ dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
+ if (aac_adapter_ioremap(dev, dev->base_size)) {
+ printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+ goto error_iounmap;
+ }
+
+ /* Failure to reset here is an option ... */
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+ if ((aac_reset_devices || reset_devices) &&
+ !aac_src_restart_adapter(dev, 0))
+ ++restart;
+ /*
+ * Check to see if flash update is running.
+ * Wait for the adapter to be up and running. Wait up to 5 minutes
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & FLASH_UPD_PENDING) {
+ start = jiffies;
+ do {
+ status = src_readl(dev, MUnit.OMR);
+ if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
+ printk(KERN_ERR "%s%d: adapter flash update failed.\n",
+ dev->name, instance);
+ goto error_iounmap;
+ }
+ } while (!(status & FLASH_UPD_SUCCESS) &&
+ !(status & FLASH_UPD_FAILED));
+ /* Delay 10 seconds.
+ * Because right now FW is doing a soft reset,
+ * do not read scratch pad register at this time
+ */
+ ssleep(10);
+ }
+ /*
+ * Check to see if the board panic'd while booting.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & KERNEL_PANIC) {
+ if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+ goto error_iounmap;
+ ++restart;
+ }
+ /*
+ * Check to see if the board failed any self tests.
+ */
+ status = src_readl(dev, MUnit.OMR);
+ if (status & SELF_TEST_FAILED) {
+ printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ /*
+ * Check to see if the monitor panic'd while booting.
+ */
+ if (status & MONITOR_PANIC) {
+ printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+ goto error_iounmap;
+ }
+ start = jiffies;
+ /*
+ * Wait for the adapter to be up and running. Wait up to 3 minutes
+ */
+ while (!((status = src_readl(dev, MUnit.OMR)) &
+ KERNEL_UP_AND_RUNNING) ||
+ status == 0xffffffff) {
+ if ((restart &&
+ (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+ time_after(jiffies, start+HZ*startup_timeout)) {
+ printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+ dev->name, instance, status);
+ goto error_iounmap;
+ }
+ if (!restart &&
+ ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+ time_after(jiffies, start + HZ *
+ ((startup_timeout > 60)
+ ? (startup_timeout - 60)
+ : (startup_timeout / 2))))) {
+ if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
+ start = jiffies;
+ ++restart;
+ }
+ msleep(1);
+ }
+ if (restart && aac_commit)
+ aac_commit = 1;
+ /*
+ * Fill in the common function dispatch table.
+ */
+ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+ dev->a_ops.adapter_notify = aac_src_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_src_check_health;
+ dev->a_ops.adapter_restart = aac_src_restart_adapter;
+
+ /*
+ * First clear out all interrupts. Then enable the one's that we
+ * can handle.
+ */
+ aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+ aac_adapter_disable_int(dev);
+ src_writel(dev, MUnit.ODR_C, 0xffffffff);
+ aac_adapter_enable_int(dev);
+
+ if (aac_init_adapter(dev) == NULL)
+ goto error_iounmap;
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
+ goto error_iounmap;
+ dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+ if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+ IRQF_SHARED, "aacraid", dev) < 0) {
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+ name, instance);
+ goto error_iounmap;
+ }
+ dev->dbg_base = dev->base_start;
+ dev->dbg_base_mapped = dev->base;
+ dev->dbg_size = dev->base_size;
+
+ aac_adapter_enable_int(dev);
+
+ if (!dev->sync_mode) {
+ /*
+ * Tell the adapter that all is configured, and it can
+ * start accepting requests
+ */
+ aac_src_start_adapter(dev);
+ }
+ return 0;
+
+error_iounmap:
+
+ return -1;
+}
+